Index: src/spaces.h |
=================================================================== |
--- src/spaces.h (revision 4722) |
+++ src/spaces.h (working copy) |
@@ -45,47 +45,24 @@ |
// The old generation is collected by a mark-sweep-compact collector. |
// |
// The semispaces of the young generation are contiguous. The old and map |
-// spaces consists of a list of pages. A page has a page header and an object |
-// area. A page size is deliberately chosen as 8K bytes. |
-// The first word of a page is an opaque page header that has the |
+// spaces consists of a list of pages. A page has a page header, a remembered |
+// set area, and an object area. A page size is deliberately chosen as 8K |
+// bytes. The first word of a page is an opaque page header that has the |
// address of the next page and its ownership information. The second word may |
-// have the allocation top address of this page. Heap objects are aligned to the |
-// pointer size. |
+// have the allocation top address of this page. The next 248 bytes are |
+// remembered sets. Heap objects are aligned to the pointer size (4 bytes). A |
+// remembered set bit corresponds to a pointer in the object area. |
// |
// There is a separate large object space for objects larger than |
// Page::kMaxHeapObjectSize, so that they do not have to move during |
-// collection. The large object space is paged. Pages in large object space |
-// may be larger than 8K. |
+// collection. The large object space is paged and uses the same remembered |
+// set implementation. Pages in large object space may be larger than 8K. |
// |
-// A card marking write barrier is used to keep track of intergenerational |
-// references. Old space pages are divided into regions of Page::kRegionSize |
-// size. Each region has a corresponding dirty bit in the page header which is |
-// set if the region might contain pointers to new space. For details about |
-// dirty bits encoding see comments in the Page::GetRegionNumberForAddress() |
-// method body. |
-// |
-// During scavenges and mark-sweep collections we iterate intergenerational |
-// pointers without decoding heap object maps so if the page belongs to old |
-// pointer space or large object space it is essential to guarantee that |
-// the page does not contain any garbage pointers to new space: every pointer |
-// aligned word which satisfies the Heap::InNewSpace() predicate must be a |
-// pointer to a live heap object in new space. Thus objects in old pointer |
-// and large object spaces should have a special layout (e.g. no bare integer |
-// fields). This requirement does not apply to map space which is iterated in |
-// a special fashion. However we still require pointer fields of dead maps to |
-// be cleaned. |
-// |
-// To enable lazy cleaning of old space pages we use a notion of allocation |
-// watermark. Every pointer under watermark is considered to be well formed. |
-// Page allocation watermark is not necessarily equal to page allocation top but |
-// all alive objects on page should reside under allocation watermark. |
-// During scavenge allocation watermark might be bumped and invalid pointers |
-// might appear below it. To avoid following them we store a valid watermark |
-// into special field in the page header and set a page WATERMARK_INVALIDATED |
-// flag. For details see comments in the Page::SetAllocationWatermark() method |
-// body. |
-// |
+// NOTE: The mark-compact collector rebuilds the remembered set after a |
+// collection. It reuses first a few words of the remembered set for |
+// bookkeeping relocation information. |
+ |
// Some assertion macros used in the debugging mode. |
#define ASSERT_PAGE_ALIGNED(address) \ |
@@ -114,13 +91,25 @@ |
// ----------------------------------------------------------------------------- |
// A page normally has 8K bytes. Large object pages may be larger. A page |
-// address is always aligned to the 8K page size. |
+// address is always aligned to the 8K page size. A page is divided into |
+// three areas: the first two words are used for bookkeeping, the next 248 |
+// bytes are used as remembered set, and the rest of the page is the object |
+// area. |
// |
-// Each page starts with a header of Page::kPageHeaderSize size which contains |
-// bookkeeping data. |
+// Pointers are aligned to the pointer size (4), only 1 bit is needed |
+// for a pointer in the remembered set. Given an address, its remembered set |
+// bit position (offset from the start of the page) is calculated by dividing |
+// its page offset by 32. Therefore, the object area in a page starts at the |
+// 256th byte (8K/32). Bytes 0 to 255 do not need the remembered set, so that |
+// the first two words (64 bits) in a page can be used for other purposes. |
// |
+// On the 64-bit platform, we add an offset to the start of the remembered set, |
+// and pointers are aligned to 8-byte pointer size. This means that we need |
+// only 128 bytes for the RSet, and only get two bytes free in the RSet's RSet. |
+// For this reason we add an offset to get room for the Page data at the start. |
+// |
// The mark-compact collector transforms a map pointer into a page index and a |
-// page offset. The exact encoding is described in the comments for |
+// page offset. The excact encoding is described in the comments for |
// class MapWord in objects.h. |
// |
// The only way to get a page pointer is by calling factory methods: |
@@ -161,25 +150,18 @@ |
// Return the end of allocation in this page. Undefined for unused pages. |
inline Address AllocationTop(); |
- // Return the allocation watermark for the page. |
- // For old space pages it is guaranteed that the area under the watermark |
- // does not contain any garbage pointers to new space. |
- inline Address AllocationWatermark(); |
- |
- // Return the allocation watermark offset from the beginning of the page. |
- inline uint32_t AllocationWatermarkOffset(); |
- |
- inline void SetAllocationWatermark(Address allocation_watermark); |
- |
- inline void SetCachedAllocationWatermark(Address allocation_watermark); |
- inline Address CachedAllocationWatermark(); |
- |
// Returns the start address of the object area in this page. |
Address ObjectAreaStart() { return address() + kObjectStartOffset; } |
// Returns the end address (exclusive) of the object area in this page. |
Address ObjectAreaEnd() { return address() + Page::kPageSize; } |
+ // Returns the start address of the remembered set area. |
+ Address RSetStart() { return address() + kRSetStartOffset; } |
+ |
+ // Returns the end address of the remembered set area (exclusive). |
+ Address RSetEnd() { return address() + kRSetEndOffset; } |
+ |
// Checks whether an address is page aligned. |
static bool IsAlignedToPageSize(Address a) { |
return 0 == (OffsetFrom(a) & kPageAlignmentMask); |
@@ -211,100 +193,74 @@ |
} |
// --------------------------------------------------------------------- |
- // Card marking support |
+ // Remembered set support |
- static const uint32_t kAllRegionsCleanMarks = 0x0; |
- static const uint32_t kAllRegionsDirtyMarks = 0xFFFFFFFF; |
+ // Clears remembered set in this page. |
+ inline void ClearRSet(); |
- inline uint32_t GetRegionMarks(); |
- inline void SetRegionMarks(uint32_t dirty); |
+ // Return the address of the remembered set word corresponding to an |
+ // object address/offset pair, and the bit encoded as a single-bit |
+ // mask in the output parameter 'bitmask'. |
+ INLINE(static Address ComputeRSetBitPosition(Address address, int offset, |
+ uint32_t* bitmask)); |
- inline uint32_t GetRegionMaskForAddress(Address addr); |
- inline int GetRegionNumberForAddress(Address addr); |
+ // Sets the corresponding remembered set bit for a given address. |
+ INLINE(static void SetRSet(Address address, int offset)); |
- inline void MarkRegionDirty(Address addr); |
- inline bool IsRegionDirty(Address addr); |
+ // Clears the corresponding remembered set bit for a given address. |
+ static inline void UnsetRSet(Address address, int offset); |
- inline void ClearRegionMarks(Address start, |
- Address end, |
- bool reaches_limit); |
+ // Checks whether the remembered set bit for a given address is set. |
+ static inline bool IsRSetSet(Address address, int offset); |
+#ifdef DEBUG |
+ // Use a state to mark whether remembered set space can be used for other |
+ // purposes. |
+ enum RSetState { IN_USE, NOT_IN_USE }; |
+ static bool is_rset_in_use() { return rset_state_ == IN_USE; } |
+ static void set_rset_state(RSetState state) { rset_state_ = state; } |
+#endif |
+ |
// Page size in bytes. This must be a multiple of the OS page size. |
static const int kPageSize = 1 << kPageSizeBits; |
// Page size mask. |
static const intptr_t kPageAlignmentMask = (1 << kPageSizeBits) - 1; |
- static const int kPageHeaderSize = kPointerSize + kPointerSize + kIntSize + |
- kIntSize + kPointerSize; |
+ // The offset of the remembered set in a page, in addition to the empty bytes |
+ // formed as the remembered bits of the remembered set itself. |
+#ifdef V8_TARGET_ARCH_X64 |
+ static const int kRSetOffset = 4 * kPointerSize; // Room for four pointers. |
+#else |
+ static const int kRSetOffset = 0; |
+#endif |
+ // The end offset of the remembered set in a page |
+ // (heaps are aligned to pointer size). |
+ static const int kRSetEndOffset = kRSetOffset + kPageSize / kBitsPerPointer; |
// The start offset of the object area in a page. |
- static const int kObjectStartOffset = MAP_POINTER_ALIGN(kPageHeaderSize); |
+ // This needs to be at least (bits per uint32_t) * kBitsPerPointer, |
+ // to align start of rset to a uint32_t address. |
+ static const int kObjectStartOffset = 256; |
+ // The start offset of the used part of the remembered set in a page. |
+ static const int kRSetStartOffset = kRSetOffset + |
+ kObjectStartOffset / kBitsPerPointer; |
+ |
// Object area size in bytes. |
static const int kObjectAreaSize = kPageSize - kObjectStartOffset; |
// Maximum object size that fits in a page. |
static const int kMaxHeapObjectSize = kObjectAreaSize; |
- static const int kDirtyFlagOffset = 2 * kPointerSize; |
- static const int kRegionSizeLog2 = 8; |
- static const int kRegionSize = 1 << kRegionSizeLog2; |
- static const intptr_t kRegionAlignmentMask = (kRegionSize - 1); |
- |
- STATIC_CHECK(kRegionSize == kPageSize / kBitsPerInt); |
- |
enum PageFlag { |
IS_NORMAL_PAGE = 1 << 0, |
- WAS_IN_USE_BEFORE_MC = 1 << 1, |
- |
- // Page allocation watermark was bumped by preallocation during scavenge. |
- // Correct watermark can be retrieved by CachedAllocationWatermark() method |
- WATERMARK_INVALIDATED = 1 << 2 |
+ WAS_IN_USE_BEFORE_MC = 1 << 1 |
}; |
- // To avoid an additional WATERMARK_INVALIDATED flag clearing pass during |
- // scavenge we just invalidate the watermark on each old space page after |
- // processing it. And then we flip the meaning of the WATERMARK_INVALIDATED |
- // flag at the beginning of the next scavenge and each page becomes marked as |
- // having a valid watermark. |
- // |
- // The following invariant must hold for pages in old pointer and map spaces: |
- // If page is in use then page is marked as having invalid watermark at |
- // the beginning and at the end of any GC. |
- // |
- // This invariant guarantees that after flipping flag meaning at the |
- // beginning of scavenge all pages in use will be marked as having valid |
- // watermark. |
- static inline void FlipMeaningOfInvalidatedWatermarkFlag(); |
- |
- // Returns true if the page allocation watermark was not altered during |
- // scavenge. |
- inline bool IsWatermarkValid(); |
- |
- inline void InvalidateWatermark(bool value); |
- |
inline bool GetPageFlag(PageFlag flag); |
inline void SetPageFlag(PageFlag flag, bool value); |
- inline void ClearPageFlags(); |
- static const int kAllocationWatermarkOffsetShift = 3; |
- static const int kAllocationWatermarkOffsetBits = kPageSizeBits + 1; |
- static const uint32_t kAllocationWatermarkOffsetMask = |
- ((1 << kAllocationWatermarkOffsetBits) - 1) << |
- kAllocationWatermarkOffsetShift; |
- |
- static const uint32_t kFlagsMask = |
- ((1 << kAllocationWatermarkOffsetShift) - 1); |
- |
- STATIC_CHECK(kBitsPerInt - kAllocationWatermarkOffsetShift >= |
- kAllocationWatermarkOffsetBits); |
- |
- // This field contains the meaning of the WATERMARK_INVALIDATED flag. |
- // Instead of clearing this flag from all pages we just flip |
- // its meaning at the beginning of a scavenge. |
- static intptr_t watermark_invalidated_mark_; |
- |
//--------------------------------------------------------------------------- |
// Page header description. |
// |
@@ -323,24 +279,26 @@ |
// second word *may* (if the page start and large object chunk start are |
// the same) contain the large object chunk size. In either case, the |
// low-order bit for large object pages will be cleared. |
- // For normal pages this word is used to store page flags and |
- // offset of allocation top. |
- intptr_t flags_; |
+ // For normal pages this word is used to store various page flags. |
+ int flags; |
- // This field contains dirty marks for regions covering the page. Only dirty |
- // regions might contain intergenerational references. |
- // Only 32 dirty marks are supported so for large object pages several regions |
- // might be mapped to a single dirty mark. |
- uint32_t dirty_regions_; |
+ // The following fields may overlap with remembered set, they can only |
+ // be used in the mark-compact collector when remembered set is not |
+ // used. |
// The index of the page in its owner space. |
int mc_page_index; |
- // During mark-compact collections this field contains the forwarding address |
- // of the first live object in this page. |
- // During scavenge collection this field is used to store allocation watermark |
- // if it is altered during scavenge. |
+ // The allocation pointer after relocating objects to this page. |
+ Address mc_relocation_top; |
+ |
+ // The forwarding address of the first live object in this page. |
Address mc_first_forwarded; |
+ |
+#ifdef DEBUG |
+ private: |
+ static RSetState rset_state_; // state of the remembered set |
+#endif |
}; |
@@ -963,7 +921,8 @@ |
// Checks whether page is currently in use by this space. |
bool IsUsed(Page* page); |
- void MarkAllPagesClean(); |
+ // Clears remembered sets of pages in this space. |
+ void ClearRSet(); |
// Prepares for a mark-compact GC. |
virtual void PrepareForMarkCompact(bool will_compact); |
@@ -977,11 +936,6 @@ |
// The limit of allocation for a page in this space. |
virtual Address PageAllocationLimit(Page* page) = 0; |
- void FlushTopPageWatermark() { |
- AllocationTopPage()->SetCachedAllocationWatermark(top()); |
- AllocationTopPage()->InvalidateWatermark(true); |
- } |
- |
// Current capacity without growing (Size() + Available() + Waste()). |
int Capacity() { return accounting_stats_.Capacity(); } |
@@ -1036,8 +990,7 @@ |
// Writes relocation info to the top page. |
void MCWriteRelocationInfoToPage() { |
- TopPageOf(mc_forwarding_info_)-> |
- SetAllocationWatermark(mc_forwarding_info_.top); |
+ TopPageOf(mc_forwarding_info_)->mc_relocation_top = mc_forwarding_info_.top; |
} |
// Computes the offset of a given address in this space to the beginning |
@@ -1155,6 +1108,8 @@ |
#ifdef DEBUG |
// Returns the number of total pages in this space. |
int CountTotalPages(); |
+ |
+ void DoPrintRSet(const char* space_name); |
#endif |
private: |
@@ -1807,6 +1762,8 @@ |
#ifdef DEBUG |
// Reports statistics for the space |
void ReportStatistics(); |
+ // Dump the remembered sets in the space to stdout. |
+ void PrintRSet(); |
#endif |
protected: |
@@ -1871,6 +1828,9 @@ |
#ifdef DEBUG |
// Reports statistic info of the space |
void ReportStatistics(); |
+ |
+ // Dump the remembered sets in the space to stdout. |
+ void PrintRSet(); |
#endif |
protected: |
@@ -1939,11 +1899,11 @@ |
PageIterator it(this, PageIterator::ALL_PAGES); |
while (pages_left-- > 0) { |
ASSERT(it.has_next()); |
- it.next()->SetRegionMarks(Page::kAllRegionsCleanMarks); |
+ it.next()->ClearRSet(); |
} |
ASSERT(it.has_next()); |
Page* top_page = it.next(); |
- top_page->SetRegionMarks(Page::kAllRegionsCleanMarks); |
+ top_page->ClearRSet(); |
ASSERT(top_page->is_valid()); |
int offset = live_maps % kMapsPerPage * Map::kSize; |
@@ -2034,8 +1994,9 @@ |
public: |
// Allocates a new LargeObjectChunk that contains a large object page |
// (Page::kPageSize aligned) that has at least size_in_bytes (for a large |
- // object) bytes after the object area start of that page. |
- // The allocated chunk size is set in the output parameter chunk_size. |
+ // object and possibly extra remembered set words) bytes after the object |
+ // area start of that page. The allocated chunk size is set in the output |
+ // parameter chunk_size. |
static LargeObjectChunk* New(int size_in_bytes, |
size_t* chunk_size, |
Executability executable); |
@@ -2058,12 +2019,16 @@ |
// Returns the object in this chunk. |
inline HeapObject* GetObject(); |
- // Given a requested size returns the physical size of a chunk to be |
- // allocated. |
+ // Given a requested size (including any extra remembered set words), |
+ // returns the physical size of a chunk to be allocated. |
static int ChunkSizeFor(int size_in_bytes); |
- // Given a chunk size, returns the object size it can accommodate. Used by |
- // LargeObjectSpace::Available. |
+ // Given a chunk size, returns the object size it can accommodate (not |
+ // including any extra remembered set words). Used by |
+ // LargeObjectSpace::Available. Note that this can overestimate the size |
+ // of object that will fit in a chunk---if the object requires extra |
+ // remembered set words (eg, for large fixed arrays), the actual object |
+ // size for the chunk will be smaller than reported by this function. |
static int ObjectSizeFor(int chunk_size) { |
if (chunk_size <= (Page::kPageSize + Page::kObjectStartOffset)) return 0; |
return chunk_size - Page::kPageSize - Page::kObjectStartOffset; |
@@ -2099,7 +2064,8 @@ |
// Allocates a large FixedArray. |
Object* AllocateRawFixedArray(int size_in_bytes); |
- // Available bytes for objects in this space. |
+ // Available bytes for objects in this space, not including any extra |
+ // remembered set words. |
int Available() { |
return LargeObjectChunk::ObjectSizeFor(MemoryAllocator::Available()); |
} |
@@ -2117,9 +2083,12 @@ |
// space, may be slow. |
Object* FindObject(Address a); |
- // Iterates objects covered by dirty regions. |
- void IterateDirtyRegions(ObjectSlotCallback func); |
+ // Clears remembered sets. |
+ void ClearRSet(); |
+ // Iterates objects whose remembered set bits are set. |
+ void IterateRSet(ObjectSlotCallback func); |
+ |
// Frees unmarked objects. |
void FreeUnmarkedObjects(); |
@@ -2145,6 +2114,8 @@ |
virtual void Print(); |
void ReportStatistics(); |
void CollectCodeStatistics(); |
+ // Dump the remembered sets in the space to stdout. |
+ void PrintRSet(); |
#endif |
// Checks whether an address is in the object area in this space. It |
// iterates all objects in the space. May be slow. |
@@ -2163,6 +2134,10 @@ |
int object_size, |
Executability executable); |
+ // Returns the number of extra bytes (rounded up to the nearest full word) |
+ // required for extra_object_bytes of extra pointers (in bytes). |
+ static inline int ExtraRSetBytesFor(int extra_object_bytes); |
+ |
friend class LargeObjectIterator; |
public: |