OLD | NEW |
1 // Copyright 2011 the V8 project authors. All rights reserved. | 1 // Copyright 2011 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #ifndef V8_HEAP_SPACES_H_ | 5 #ifndef V8_HEAP_SPACES_H_ |
6 #define V8_HEAP_SPACES_H_ | 6 #define V8_HEAP_SPACES_H_ |
7 | 7 |
8 #include <list> | 8 #include <list> |
9 #include <memory> | 9 #include <memory> |
10 #include <unordered_set> | 10 #include <unordered_set> |
11 | 11 |
12 #include "src/allocation.h" | 12 #include "src/allocation.h" |
13 #include "src/base/atomic-utils.h" | 13 #include "src/base/atomic-utils.h" |
14 #include "src/base/atomicops.h" | 14 #include "src/base/atomicops.h" |
15 #include "src/base/bits.h" | 15 #include "src/base/bits.h" |
16 #include "src/base/hashmap.h" | 16 #include "src/base/hashmap.h" |
17 #include "src/base/platform/mutex.h" | 17 #include "src/base/platform/mutex.h" |
18 #include "src/flags.h" | 18 #include "src/flags.h" |
| 19 #include "src/globals.h" |
19 #include "src/heap/marking.h" | 20 #include "src/heap/marking.h" |
20 #include "src/list.h" | 21 #include "src/list.h" |
21 #include "src/objects.h" | 22 #include "src/objects.h" |
22 #include "src/utils.h" | 23 #include "src/utils.h" |
23 | 24 |
24 namespace v8 { | 25 namespace v8 { |
25 namespace internal { | 26 namespace internal { |
26 | 27 |
27 class AllocationInfo; | 28 class AllocationInfo; |
28 class AllocationObserver; | 29 class AllocationObserver; |
(...skipping 21 matching lines...) Expand all Loading... |
50 // scavenger implements Cheney's copying algorithm. The old generation is | 51 // scavenger implements Cheney's copying algorithm. The old generation is |
51 // separated into a map space and an old object space. The map space contains | 52 // separated into a map space and an old object space. The map space contains |
52 // all (and only) map objects, the rest of old objects go into the old space. | 53 // all (and only) map objects, the rest of old objects go into the old space. |
53 // The old generation is collected by a mark-sweep-compact collector. | 54 // The old generation is collected by a mark-sweep-compact collector. |
54 // | 55 // |
55 // The semispaces of the young generation are contiguous. The old and map | 56 // The semispaces of the young generation are contiguous. The old and map |
56 // spaces consists of a list of pages. A page has a page header and an object | 57 // spaces consists of a list of pages. A page has a page header and an object |
57 // area. | 58 // area. |
58 // | 59 // |
59 // There is a separate large object space for objects larger than | 60 // There is a separate large object space for objects larger than |
60 // Page::kMaxRegularHeapObjectSize, so that they do not have to move during | 61 // kMaxRegularHeapObjectSize, so that they do not have to move during |
61 // collection. The large object space is paged. Pages in large object space | 62 // collection. The large object space is paged. Pages in large object space |
62 // may be larger than the page size. | 63 // may be larger than the page size. |
63 // | 64 // |
64 // A store-buffer based write barrier is used to keep track of intergenerational | 65 // A store-buffer based write barrier is used to keep track of intergenerational |
65 // references. See heap/store-buffer.h. | 66 // references. See heap/store-buffer.h. |
66 // | 67 // |
67 // During scavenges and mark-sweep collections we sometimes (after a store | 68 // During scavenges and mark-sweep collections we sometimes (after a store |
68 // buffer overflow) iterate intergenerational pointers without decoding heap | 69 // buffer overflow) iterate intergenerational pointers without decoding heap |
69 // object maps so if the page belongs to old space or large object space | 70 // object maps so if the page belongs to old space or large object space |
70 // it is essential to guarantee that the page does not contain any | 71 // it is essential to guarantee that the page does not contain any |
(...skipping 27 matching lines...) Expand all Loading... |
98 | 99 |
99 // Some assertion macros used in the debugging mode. | 100 // Some assertion macros used in the debugging mode. |
100 | 101 |
101 #define DCHECK_PAGE_ALIGNED(address) \ | 102 #define DCHECK_PAGE_ALIGNED(address) \ |
102 DCHECK((OffsetFrom(address) & Page::kPageAlignmentMask) == 0) | 103 DCHECK((OffsetFrom(address) & Page::kPageAlignmentMask) == 0) |
103 | 104 |
104 #define DCHECK_OBJECT_ALIGNED(address) \ | 105 #define DCHECK_OBJECT_ALIGNED(address) \ |
105 DCHECK((OffsetFrom(address) & kObjectAlignmentMask) == 0) | 106 DCHECK((OffsetFrom(address) & kObjectAlignmentMask) == 0) |
106 | 107 |
107 #define DCHECK_OBJECT_SIZE(size) \ | 108 #define DCHECK_OBJECT_SIZE(size) \ |
108 DCHECK((0 < size) && (size <= Page::kMaxRegularHeapObjectSize)) | 109 DCHECK((0 < size) && (size <= kMaxRegularHeapObjectSize)) |
109 | 110 |
110 #define DCHECK_CODEOBJECT_SIZE(size, code_space) \ | 111 #define DCHECK_CODEOBJECT_SIZE(size, code_space) \ |
111 DCHECK((0 < size) && (size <= code_space->AreaSize())) | 112 DCHECK((0 < size) && (size <= code_space->AreaSize())) |
112 | 113 |
113 #define DCHECK_PAGE_OFFSET(offset) \ | 114 #define DCHECK_PAGE_OFFSET(offset) \ |
114 DCHECK((Page::kObjectStartOffset <= offset) && (offset <= Page::kPageSize)) | 115 DCHECK((Page::kObjectStartOffset <= offset) && (offset <= Page::kPageSize)) |
115 | 116 |
116 enum FreeListCategoryType { | 117 enum FreeListCategoryType { |
117 kTiniest, | 118 kTiniest, |
118 kTiny, | 119 kTiny, |
(...skipping 568 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
687 | 688 |
688 private: | 689 private: |
689 void InitializeReservedMemory() { reservation_.Reset(); } | 690 void InitializeReservedMemory() { reservation_.Reset(); } |
690 | 691 |
691 friend class MemoryAllocator; | 692 friend class MemoryAllocator; |
692 friend class MemoryChunkValidator; | 693 friend class MemoryChunkValidator; |
693 }; | 694 }; |
694 | 695 |
695 DEFINE_OPERATORS_FOR_FLAGS(MemoryChunk::Flags) | 696 DEFINE_OPERATORS_FOR_FLAGS(MemoryChunk::Flags) |
696 | 697 |
| 698 STATIC_ASSERT(kMaxRegularHeapObjectSize < MemoryChunk::kAllocatableMemory); |
| 699 |
697 // ----------------------------------------------------------------------------- | 700 // ----------------------------------------------------------------------------- |
698 // A page is a memory chunk of a size 1MB. Large object pages may be larger. | 701 // A page is a memory chunk of a size 1MB. Large object pages may be larger. |
699 // | 702 // |
700 // The only way to get a page pointer is by calling factory methods: | 703 // The only way to get a page pointer is by calling factory methods: |
701 // Page* p = Page::FromAddress(addr); or | 704 // Page* p = Page::FromAddress(addr); or |
702 // Page* p = Page::FromTopOrLimit(top); | 705 // Page* p = Page::FromTopOrLimit(top); |
703 class Page : public MemoryChunk { | 706 class Page : public MemoryChunk { |
704 public: | 707 public: |
705 static const intptr_t kCopyAllFlags = ~0; | 708 static const intptr_t kCopyAllFlags = ~0; |
706 | 709 |
707 // Page flags copied from from-space to to-space when flipping semispaces. | 710 // Page flags copied from from-space to to-space when flipping semispaces. |
708 static const intptr_t kCopyOnFlipFlagsMask = | 711 static const intptr_t kCopyOnFlipFlagsMask = |
709 static_cast<intptr_t>(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING) | | 712 static_cast<intptr_t>(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING) | |
710 static_cast<intptr_t>(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING); | 713 static_cast<intptr_t>(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING); |
711 | 714 |
712 // Maximum object size that gets allocated into regular pages. Objects larger | |
713 // than that size are allocated in large object space and are never moved in | |
714 // memory. This also applies to new space allocation, since objects are never | |
715 // migrated from new space to large object space. Takes double alignment into | |
716 // account. | |
717 // TODO(hpayer): This limit should be way smaller but we currently have | |
718 // short living objects >256K. | |
719 static const int kMaxRegularHeapObjectSize = 512 * KB - Page::kHeaderSize; | |
720 | |
721 static inline Page* ConvertNewToOld(Page* old_page, PagedSpace* new_owner); | 715 static inline Page* ConvertNewToOld(Page* old_page, PagedSpace* new_owner); |
722 | 716 |
723 // Returns the page containing a given address. The address ranges | 717 // Returns the page containing a given address. The address ranges |
724 // from [page_addr .. page_addr + kPageSize[. This only works if the object | 718 // from [page_addr .. page_addr + kPageSize[. This only works if the object |
725 // is in fact in a page. | 719 // is in fact in a page. |
726 static Page* FromAddress(Address addr) { | 720 static Page* FromAddress(Address addr) { |
727 return reinterpret_cast<Page*>(OffsetFrom(addr) & ~kPageAlignmentMask); | 721 return reinterpret_cast<Page*>(OffsetFrom(addr) & ~kPageAlignmentMask); |
728 } | 722 } |
729 | 723 |
730 // Returns the page containing the address provided. The address can | 724 // Returns the page containing the address provided. The address can |
(...skipping 2155 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2886 } | 2880 } |
2887 } | 2881 } |
2888 | 2882 |
2889 #ifdef VERIFY_HEAP | 2883 #ifdef VERIFY_HEAP |
2890 void VerifyObject(HeapObject* obj) override; | 2884 void VerifyObject(HeapObject* obj) override; |
2891 #endif | 2885 #endif |
2892 }; | 2886 }; |
2893 | 2887 |
2894 | 2888 |
2895 // ----------------------------------------------------------------------------- | 2889 // ----------------------------------------------------------------------------- |
2896 // Large objects ( > Page::kMaxRegularHeapObjectSize ) are allocated and | 2890 // Large objects ( > kMaxRegularHeapObjectSize ) are allocated and |
2897 // managed by the large object space. A large object is allocated from OS | 2891 // managed by the large object space. A large object is allocated from OS |
2898 // heap with extra padding bytes (Page::kPageSize + Page::kObjectStartOffset). | 2892 // heap with extra padding bytes (Page::kPageSize + Page::kObjectStartOffset). |
2899 // A large object always starts at Page::kObjectStartOffset to a page. | 2893 // A large object always starts at Page::kObjectStartOffset to a page. |
2900 // Large objects do not move during garbage collections. | 2894 // Large objects do not move during garbage collections. |
2901 | 2895 |
2902 class LargeObjectSpace : public Space { | 2896 class LargeObjectSpace : public Space { |
2903 public: | 2897 public: |
2904 typedef LargePageIterator iterator; | 2898 typedef LargePageIterator iterator; |
2905 | 2899 |
2906 LargeObjectSpace(Heap* heap, AllocationSpace id); | 2900 LargeObjectSpace(Heap* heap, AllocationSpace id); |
(...skipping 132 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3039 count = 0; | 3033 count = 0; |
3040 } | 3034 } |
3041 // Must be small, since an iteration is used for lookup. | 3035 // Must be small, since an iteration is used for lookup. |
3042 static const int kMaxComments = 64; | 3036 static const int kMaxComments = 64; |
3043 }; | 3037 }; |
3044 #endif | 3038 #endif |
3045 } // namespace internal | 3039 } // namespace internal |
3046 } // namespace v8 | 3040 } // namespace v8 |
3047 | 3041 |
3048 #endif // V8_HEAP_SPACES_H_ | 3042 #endif // V8_HEAP_SPACES_H_ |
OLD | NEW |