Index: src/spaces.cc |
=================================================================== |
--- src/spaces.cc (revision 4722) |
+++ src/spaces.cc (working copy) |
@@ -41,7 +41,6 @@ |
&& (info).top <= (space).high() \ |
&& (info).limit == (space).high()) |
-intptr_t Page::watermark_invalidated_mark_ = Page::WATERMARK_INVALIDATED; |
// ---------------------------------------------------------------------------- |
// HeapObjectIterator |
@@ -140,6 +139,13 @@ |
// ----------------------------------------------------------------------------- |
+// Page |
+ |
+#ifdef DEBUG |
+Page::RSetState Page::rset_state_ = Page::IN_USE; |
+#endif |
+ |
+// ----------------------------------------------------------------------------- |
// CodeRange |
List<CodeRange::FreeBlock> CodeRange::free_list_(0); |
@@ -518,10 +524,7 @@ |
for (int i = 0; i < pages_in_chunk; i++) { |
Page* p = Page::FromAddress(page_addr); |
p->opaque_header = OffsetFrom(page_addr + Page::kPageSize) | chunk_id; |
- p->InvalidateWatermark(true); |
p->SetIsLargeObjectPage(false); |
- p->SetAllocationWatermark(p->ObjectAreaStart()); |
- p->SetCachedAllocationWatermark(p->ObjectAreaStart()); |
page_addr += Page::kPageSize; |
} |
@@ -678,7 +681,6 @@ |
p->opaque_header = OffsetFrom(page_addr + Page::kPageSize) | chunk_id; |
page_addr += Page::kPageSize; |
- p->InvalidateWatermark(true); |
if (p->WasInUseBeforeMC()) { |
*last_page_in_use = p; |
} |
@@ -742,10 +744,10 @@ |
accounting_stats_.ExpandSpace(num_pages * Page::kObjectAreaSize); |
ASSERT(Capacity() <= max_capacity_); |
- // Sequentially clear region marks in the newly allocated |
+ // Sequentially initialize remembered sets in the newly allocated |
// pages and cache the current last page in the space. |
for (Page* p = first_page_; p->is_valid(); p = p->next_page()) { |
- p->SetRegionMarks(Page::kAllRegionsCleanMarks); |
+ p->ClearRSet(); |
last_page_ = p; |
} |
@@ -792,10 +794,10 @@ |
#endif |
-void PagedSpace::MarkAllPagesClean() { |
+void PagedSpace::ClearRSet() { |
PageIterator it(this, PageIterator::ALL_PAGES); |
while (it.has_next()) { |
- it.next()->SetRegionMarks(Page::kAllRegionsCleanMarks); |
+ it.next()->ClearRSet(); |
} |
} |
@@ -898,8 +900,7 @@ |
// of forwarding addresses is as an offset in terms of live bytes, so we |
// need quick access to the allocation top of each page to decode |
// forwarding addresses. |
- current_page->SetAllocationWatermark(mc_forwarding_info_.top); |
- current_page->next_page()->InvalidateWatermark(true); |
+ current_page->mc_relocation_top = mc_forwarding_info_.top; |
SetAllocationInfo(&mc_forwarding_info_, current_page->next_page()); |
return AllocateLinearly(&mc_forwarding_info_, size_in_bytes); |
} |
@@ -927,10 +928,10 @@ |
MemoryAllocator::SetNextPage(last_page, p); |
- // Sequentially clear region marks of new pages and and cache the |
+ // Sequentially clear remembered set of new pages and and cache the |
// new last page in the space. |
while (p->is_valid()) { |
- p->SetRegionMarks(Page::kAllRegionsCleanMarks); |
+ p->ClearRSet(); |
last_page_ = p; |
p = p->next_page(); |
} |
@@ -1029,11 +1030,16 @@ |
if (above_allocation_top) { |
// We don't care what's above the allocation top. |
} else { |
+ // Unless this is the last page in the space containing allocated |
+ // objects, the allocation top should be at a constant offset from the |
+ // object area end. |
Address top = current_page->AllocationTop(); |
if (current_page == top_page) { |
ASSERT(top == allocation_info_.top); |
// The next page will be above the allocation top. |
above_allocation_top = true; |
+ } else { |
+ ASSERT(top == PageAllocationLimit(current_page)); |
} |
// It should be packed with objects from the bottom to the top. |
@@ -1054,8 +1060,8 @@ |
object->Verify(); |
// All the interior pointers should be contained in the heap and |
- // have page regions covering intergenerational references should be |
- // marked dirty. |
+ // have their remembered set bits set if required as determined |
+ // by the visitor. |
int size = object->Size(); |
object->IterateBody(map->instance_type(), size, visitor); |
@@ -1114,7 +1120,7 @@ |
start_ = start; |
address_mask_ = ~(size - 1); |
- object_mask_ = address_mask_ | kHeapObjectTagMask; |
+ object_mask_ = address_mask_ | kHeapObjectTag; |
object_expected_ = reinterpret_cast<uintptr_t>(start) | kHeapObjectTag; |
allocation_info_.top = to_space_.low(); |
@@ -1318,7 +1324,7 @@ |
start_ = start; |
address_mask_ = ~(maximum_capacity - 1); |
- object_mask_ = address_mask_ | kHeapObjectTagMask; |
+ object_mask_ = address_mask_ | kHeapObjectTag; |
object_expected_ = reinterpret_cast<uintptr_t>(start) | kHeapObjectTag; |
age_mark_ = start_; |
@@ -1628,7 +1634,7 @@ |
// If the block is too small (eg, one or two words), to hold both a size |
// field and a next pointer, we give it a filler map that gives it the |
// correct size. |
- if (size_in_bytes > ByteArray::kHeaderSize) { |
+ if (size_in_bytes > ByteArray::kAlignedSize) { |
set_map(Heap::raw_unchecked_byte_array_map()); |
// Can't use ByteArray::cast because it fails during deserialization. |
ByteArray* this_as_byte_array = reinterpret_cast<ByteArray*>(this); |
@@ -1901,14 +1907,15 @@ |
Page* p = it.next(); |
// Space below the relocation pointer is allocated. |
computed_size += |
- static_cast<int>(p->AllocationWatermark() - p->ObjectAreaStart()); |
+ static_cast<int>(p->mc_relocation_top - p->ObjectAreaStart()); |
if (it.has_next()) { |
- // Free the space at the top of the page. |
+ // Free the space at the top of the page. We cannot use |
+ // p->mc_relocation_top after the call to Free (because Free will clear |
+ // remembered set bits). |
int extra_size = |
- static_cast<int>(p->ObjectAreaEnd() - p->AllocationWatermark()); |
+ static_cast<int>(p->ObjectAreaEnd() - p->mc_relocation_top); |
if (extra_size > 0) { |
- int wasted_bytes = free_list_.Free(p->AllocationWatermark(), |
- extra_size); |
+ int wasted_bytes = free_list_.Free(p->mc_relocation_top, extra_size); |
// The bytes we have just "freed" to add to the free list were |
// already accounted as available. |
accounting_stats_.WasteBytes(wasted_bytes); |
@@ -1956,10 +1963,7 @@ |
// Clean them up. |
do { |
- first->InvalidateWatermark(true); |
- first->SetAllocationWatermark(first->ObjectAreaStart()); |
- first->SetCachedAllocationWatermark(first->ObjectAreaStart()); |
- first->SetRegionMarks(Page::kAllRegionsCleanMarks); |
+ first->ClearRSet(); |
first = first->next_page(); |
} while (first != NULL); |
@@ -1999,7 +2003,6 @@ |
// Current allocation top points to a page which is now in the middle |
// of page list. We should move allocation top forward to the new last |
// used page so various object iterators will continue to work properly. |
- last_in_use->SetAllocationWatermark(last_in_use->AllocationTop()); |
int size_in_bytes = static_cast<int>(PageAllocationLimit(last_in_use) - |
last_in_use->AllocationTop()); |
@@ -2032,7 +2035,6 @@ |
int size_in_bytes = static_cast<int>(PageAllocationLimit(p) - |
p->ObjectAreaStart()); |
- p->SetAllocationWatermark(p->ObjectAreaStart()); |
Heap::CreateFillerObjectAt(p->ObjectAreaStart(), size_in_bytes); |
} |
} |
@@ -2064,7 +2066,6 @@ |
if (!reserved_page->is_valid()) return false; |
} |
ASSERT(TopPageOf(allocation_info_)->next_page()->is_valid()); |
- TopPageOf(allocation_info_)->next_page()->InvalidateWatermark(true); |
SetAllocationInfo(&allocation_info_, |
TopPageOf(allocation_info_)->next_page()); |
return true; |
@@ -2099,15 +2100,7 @@ |
accounting_stats_.WasteBytes(wasted_bytes); |
if (!result->IsFailure()) { |
accounting_stats_.AllocateBytes(size_in_bytes); |
- |
- HeapObject* obj = HeapObject::cast(result); |
- Page* p = Page::FromAddress(obj->address()); |
- |
- if (obj->address() >= p->AllocationWatermark()) { |
- p->SetAllocationWatermark(obj->address() + size_in_bytes); |
- } |
- |
- return obj; |
+ return HeapObject::cast(result); |
} |
} |
@@ -2130,7 +2123,6 @@ |
void OldSpace::PutRestOfCurrentPageOnFreeList(Page* current_page) { |
- current_page->SetAllocationWatermark(allocation_info_.top); |
int free_size = |
static_cast<int>(current_page->ObjectAreaEnd() - allocation_info_.top); |
if (free_size > 0) { |
@@ -2141,7 +2133,6 @@ |
void FixedSpace::PutRestOfCurrentPageOnFreeList(Page* current_page) { |
- current_page->SetAllocationWatermark(allocation_info_.top); |
int free_size = |
static_cast<int>(current_page->ObjectAreaEnd() - allocation_info_.top); |
// In the fixed space free list all the free list items have the right size. |
@@ -2161,7 +2152,6 @@ |
HeapObject* OldSpace::AllocateInNextPage(Page* current_page, |
int size_in_bytes) { |
ASSERT(current_page->next_page()->is_valid()); |
- current_page->next_page()->InvalidateWatermark(true); |
PutRestOfCurrentPageOnFreeList(current_page); |
SetAllocationInfo(&allocation_info_, current_page->next_page()); |
return AllocateLinearly(&allocation_info_, size_in_bytes); |
@@ -2306,12 +2296,160 @@ |
PrintF(" capacity: %d, waste: %d, available: %d, %%%d\n", |
Capacity(), Waste(), Available(), pct); |
+ // Report remembered set statistics. |
+ int rset_marked_pointers = 0; |
+ int rset_marked_arrays = 0; |
+ int rset_marked_array_elements = 0; |
+ int cross_gen_pointers = 0; |
+ int cross_gen_array_elements = 0; |
+ |
+ PageIterator page_it(this, PageIterator::PAGES_IN_USE); |
+ while (page_it.has_next()) { |
+ Page* p = page_it.next(); |
+ |
+ for (Address rset_addr = p->RSetStart(); |
+ rset_addr < p->RSetEnd(); |
+ rset_addr += kIntSize) { |
+ int rset = Memory::int_at(rset_addr); |
+ if (rset != 0) { |
+ // Bits were set |
+ int intoff = |
+ static_cast<int>(rset_addr - p->address() - Page::kRSetOffset); |
+ int bitoff = 0; |
+ for (; bitoff < kBitsPerInt; ++bitoff) { |
+ if ((rset & (1 << bitoff)) != 0) { |
+ int bitpos = intoff*kBitsPerByte + bitoff; |
+ Address slot = p->OffsetToAddress(bitpos << kObjectAlignmentBits); |
+ Object** obj = reinterpret_cast<Object**>(slot); |
+ if (*obj == Heap::raw_unchecked_fixed_array_map()) { |
+ rset_marked_arrays++; |
+ FixedArray* fa = FixedArray::cast(HeapObject::FromAddress(slot)); |
+ |
+ rset_marked_array_elements += fa->length(); |
+ // Manually inline FixedArray::IterateBody |
+ Address elm_start = slot + FixedArray::kHeaderSize; |
+ Address elm_stop = elm_start + fa->length() * kPointerSize; |
+ for (Address elm_addr = elm_start; |
+ elm_addr < elm_stop; elm_addr += kPointerSize) { |
+ // Filter non-heap-object pointers |
+ Object** elm_p = reinterpret_cast<Object**>(elm_addr); |
+ if (Heap::InNewSpace(*elm_p)) |
+ cross_gen_array_elements++; |
+ } |
+ } else { |
+ rset_marked_pointers++; |
+ if (Heap::InNewSpace(*obj)) |
+ cross_gen_pointers++; |
+ } |
+ } |
+ } |
+ } |
+ } |
+ } |
+ |
+ pct = rset_marked_pointers == 0 ? |
+ 0 : cross_gen_pointers * 100 / rset_marked_pointers; |
+ PrintF(" rset-marked pointers %d, to-new-space %d (%%%d)\n", |
+ rset_marked_pointers, cross_gen_pointers, pct); |
+ PrintF(" rset_marked arrays %d, ", rset_marked_arrays); |
+ PrintF(" elements %d, ", rset_marked_array_elements); |
+ pct = rset_marked_array_elements == 0 ? 0 |
+ : cross_gen_array_elements * 100 / rset_marked_array_elements; |
+ PrintF(" pointers to new space %d (%%%d)\n", cross_gen_array_elements, pct); |
+ PrintF(" total rset-marked bits %d\n", |
+ (rset_marked_pointers + rset_marked_arrays)); |
+ pct = (rset_marked_pointers + rset_marked_array_elements) == 0 ? 0 |
+ : (cross_gen_pointers + cross_gen_array_elements) * 100 / |
+ (rset_marked_pointers + rset_marked_array_elements); |
+ PrintF(" total rset pointers %d, true cross generation ones %d (%%%d)\n", |
+ (rset_marked_pointers + rset_marked_array_elements), |
+ (cross_gen_pointers + cross_gen_array_elements), |
+ pct); |
+ |
ClearHistograms(); |
HeapObjectIterator obj_it(this); |
for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next()) |
CollectHistogramInfo(obj); |
ReportHistogram(true); |
} |
+ |
+ |
+// Dump the range of remembered set words between [start, end) corresponding |
+// to the pointers starting at object_p. The allocation_top is an object |
+// pointer which should not be read past. This is important for large object |
+// pages, where some bits in the remembered set range do not correspond to |
+// allocated addresses. |
+static void PrintRSetRange(Address start, Address end, Object** object_p, |
+ Address allocation_top) { |
+ Address rset_address = start; |
+ |
+ // If the range starts on on odd numbered word (eg, for large object extra |
+ // remembered set ranges), print some spaces. |
+ if ((reinterpret_cast<uintptr_t>(start) / kIntSize) % 2 == 1) { |
+ PrintF(" "); |
+ } |
+ |
+ // Loop over all the words in the range. |
+ while (rset_address < end) { |
+ uint32_t rset_word = Memory::uint32_at(rset_address); |
+ int bit_position = 0; |
+ |
+ // Loop over all the bits in the word. |
+ while (bit_position < kBitsPerInt) { |
+ if (object_p == reinterpret_cast<Object**>(allocation_top)) { |
+ // Print a bar at the allocation pointer. |
+ PrintF("|"); |
+ } else if (object_p > reinterpret_cast<Object**>(allocation_top)) { |
+ // Do not dereference object_p past the allocation pointer. |
+ PrintF("#"); |
+ } else if ((rset_word & (1 << bit_position)) == 0) { |
+ // Print a dot for zero bits. |
+ PrintF("."); |
+ } else if (Heap::InNewSpace(*object_p)) { |
+ // Print an X for one bits for pointers to new space. |
+ PrintF("X"); |
+ } else { |
+ // Print a circle for one bits for pointers to old space. |
+ PrintF("o"); |
+ } |
+ |
+ // Print a space after every 8th bit except the last. |
+ if (bit_position % 8 == 7 && bit_position != (kBitsPerInt - 1)) { |
+ PrintF(" "); |
+ } |
+ |
+ // Advance to next bit. |
+ bit_position++; |
+ object_p++; |
+ } |
+ |
+ // Print a newline after every odd numbered word, otherwise a space. |
+ if ((reinterpret_cast<uintptr_t>(rset_address) / kIntSize) % 2 == 1) { |
+ PrintF("\n"); |
+ } else { |
+ PrintF(" "); |
+ } |
+ |
+ // Advance to next remembered set word. |
+ rset_address += kIntSize; |
+ } |
+} |
+ |
+ |
+void PagedSpace::DoPrintRSet(const char* space_name) { |
+ PageIterator it(this, PageIterator::PAGES_IN_USE); |
+ while (it.has_next()) { |
+ Page* p = it.next(); |
+ PrintF("%s page 0x%x:\n", space_name, p); |
+ PrintRSetRange(p->RSetStart(), p->RSetEnd(), |
+ reinterpret_cast<Object**>(p->ObjectAreaStart()), |
+ p->AllocationTop()); |
+ PrintF("\n"); |
+ } |
+} |
+ |
+ |
+void OldSpace::PrintRSet() { DoPrintRSet("old"); } |
#endif |
// ----------------------------------------------------------------------------- |
@@ -2361,7 +2499,6 @@ |
if (it.has_next()) { |
accounting_stats_.WasteBytes( |
static_cast<int>(page->ObjectAreaEnd() - page_top)); |
- page->SetAllocationWatermark(page_top); |
} |
} |
@@ -2391,14 +2528,7 @@ |
Object* result = free_list_.Allocate(); |
if (!result->IsFailure()) { |
accounting_stats_.AllocateBytes(size_in_bytes); |
- HeapObject* obj = HeapObject::cast(result); |
- Page* p = Page::FromAddress(obj->address()); |
- |
- if (obj->address() >= p->AllocationWatermark()) { |
- p->SetAllocationWatermark(obj->address() + size_in_bytes); |
- } |
- |
- return obj; |
+ return HeapObject::cast(result); |
} |
} |
@@ -2428,8 +2558,6 @@ |
ASSERT(current_page->next_page()->is_valid()); |
ASSERT(allocation_info_.top == PageAllocationLimit(current_page)); |
ASSERT_EQ(object_size_in_bytes_, size_in_bytes); |
- current_page->next_page()->InvalidateWatermark(true); |
- current_page->SetAllocationWatermark(allocation_info_.top); |
accounting_stats_.WasteBytes(page_extra_); |
SetAllocationInfo(&allocation_info_, current_page->next_page()); |
return AllocateLinearly(&allocation_info_, size_in_bytes); |
@@ -2442,12 +2570,51 @@ |
PrintF(" capacity: %d, waste: %d, available: %d, %%%d\n", |
Capacity(), Waste(), Available(), pct); |
+ // Report remembered set statistics. |
+ int rset_marked_pointers = 0; |
+ int cross_gen_pointers = 0; |
+ |
+ PageIterator page_it(this, PageIterator::PAGES_IN_USE); |
+ while (page_it.has_next()) { |
+ Page* p = page_it.next(); |
+ |
+ for (Address rset_addr = p->RSetStart(); |
+ rset_addr < p->RSetEnd(); |
+ rset_addr += kIntSize) { |
+ int rset = Memory::int_at(rset_addr); |
+ if (rset != 0) { |
+ // Bits were set |
+ int intoff = |
+ static_cast<int>(rset_addr - p->address() - Page::kRSetOffset); |
+ int bitoff = 0; |
+ for (; bitoff < kBitsPerInt; ++bitoff) { |
+ if ((rset & (1 << bitoff)) != 0) { |
+ int bitpos = intoff*kBitsPerByte + bitoff; |
+ Address slot = p->OffsetToAddress(bitpos << kObjectAlignmentBits); |
+ Object** obj = reinterpret_cast<Object**>(slot); |
+ rset_marked_pointers++; |
+ if (Heap::InNewSpace(*obj)) |
+ cross_gen_pointers++; |
+ } |
+ } |
+ } |
+ } |
+ } |
+ |
+ pct = rset_marked_pointers == 0 ? |
+ 0 : cross_gen_pointers * 100 / rset_marked_pointers; |
+ PrintF(" rset-marked pointers %d, to-new-space %d (%%%d)\n", |
+ rset_marked_pointers, cross_gen_pointers, pct); |
+ |
ClearHistograms(); |
HeapObjectIterator obj_it(this); |
for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next()) |
CollectHistogramInfo(obj); |
ReportHistogram(false); |
} |
+ |
+ |
+void FixedSpace::PrintRSet() { DoPrintRSet(name_); } |
#endif |
@@ -2626,7 +2793,8 @@ |
chunk->set_size(chunk_size); |
first_chunk_ = chunk; |
- // Initialize page header. |
+ // Set the object address and size in the page header and clear its |
+ // remembered set. |
Page* page = Page::FromAddress(RoundUp(chunk->address(), Page::kPageSize)); |
Address object_address = page->ObjectAreaStart(); |
// Clear the low order bit of the second word in the page to flag it as a |
@@ -2634,7 +2802,13 @@ |
// low order bit should already be clear. |
ASSERT((chunk_size & 0x1) == 0); |
page->SetIsLargeObjectPage(true); |
- page->SetRegionMarks(Page::kAllRegionsCleanMarks); |
+ page->ClearRSet(); |
+ int extra_bytes = requested_size - object_size; |
+ if (extra_bytes > 0) { |
+ // The extra memory for the remembered set should be cleared. |
+ memset(object_address + object_size, 0, extra_bytes); |
+ } |
+ |
return HeapObject::FromAddress(object_address); |
} |
@@ -2649,7 +2823,8 @@ |
Object* LargeObjectSpace::AllocateRawFixedArray(int size_in_bytes) { |
ASSERT(0 < size_in_bytes); |
- return AllocateRawInternal(size_in_bytes, |
+ int extra_rset_bytes = ExtraRSetBytesFor(size_in_bytes); |
+ return AllocateRawInternal(size_in_bytes + extra_rset_bytes, |
size_in_bytes, |
NOT_EXECUTABLE); |
} |
@@ -2676,62 +2851,60 @@ |
return Failure::Exception(); |
} |
-void LargeObjectSpace::IterateDirtyRegions(ObjectSlotCallback copy_object) { |
+ |
+void LargeObjectSpace::ClearRSet() { |
+ ASSERT(Page::is_rset_in_use()); |
+ |
LargeObjectIterator it(this); |
for (HeapObject* object = it.next(); object != NULL; object = it.next()) { |
// We only have code, sequential strings, or fixed arrays in large |
- // object space, and only fixed arrays can possibly contain pointers to |
- // the young generation. |
+ // object space, and only fixed arrays need remembered set support. |
if (object->IsFixedArray()) { |
+ // Clear the normal remembered set region of the page; |
Page* page = Page::FromAddress(object->address()); |
- uint32_t marks = page->GetRegionMarks(); |
- uint32_t newmarks = Page::kAllRegionsCleanMarks; |
+ page->ClearRSet(); |
- if (marks != Page::kAllRegionsCleanMarks) { |
- // For a large page a single dirty mark corresponds to several |
- // regions (modulo 32). So we treat a large page as a sequence of |
- // normal pages of size Page::kPageSize having same dirty marks |
- // and subsequently iterate dirty regions on each of these pages. |
- Address start = object->address(); |
- Address end = page->ObjectAreaEnd(); |
- Address object_end = start + object->Size(); |
+ // Clear the extra remembered set. |
+ int size = object->Size(); |
+ int extra_rset_bytes = ExtraRSetBytesFor(size); |
+ memset(object->address() + size, 0, extra_rset_bytes); |
+ } |
+ } |
+} |
- // Iterate regions of the first normal page covering object. |
- uint32_t first_region_number = page->GetRegionNumberForAddress(start); |
- newmarks |= |
- Heap::IterateDirtyRegions(marks >> first_region_number, |
- start, |
- end, |
- &Heap::IteratePointersInDirtyRegion, |
- copy_object) << first_region_number; |
- start = end; |
- end = start + Page::kPageSize; |
- while (end <= object_end) { |
- // Iterate next 32 regions. |
- newmarks |= |
- Heap::IterateDirtyRegions(marks, |
- start, |
- end, |
- &Heap::IteratePointersInDirtyRegion, |
- copy_object); |
- start = end; |
- end = start + Page::kPageSize; |
- } |
+void LargeObjectSpace::IterateRSet(ObjectSlotCallback copy_object_func) { |
+ ASSERT(Page::is_rset_in_use()); |
- if (start != object_end) { |
- // Iterate the last piece of an object which is less than |
- // Page::kPageSize. |
- newmarks |= |
- Heap::IterateDirtyRegions(marks, |
- start, |
- object_end, |
- &Heap::IteratePointersInDirtyRegion, |
- copy_object); |
- } |
+ static void* lo_rset_histogram = StatsTable::CreateHistogram( |
+ "V8.RSetLO", |
+ 0, |
+ // Keeping this histogram's buckets the same as the paged space histogram. |
+ Page::kObjectAreaSize / kPointerSize, |
+ 30); |
- page->SetRegionMarks(newmarks); |
+ LargeObjectIterator it(this); |
+ for (HeapObject* object = it.next(); object != NULL; object = it.next()) { |
+ // We only have code, sequential strings, or fixed arrays in large |
+ // object space, and only fixed arrays can possibly contain pointers to |
+ // the young generation. |
+ if (object->IsFixedArray()) { |
+ // Iterate the normal page remembered set range. |
+ Page* page = Page::FromAddress(object->address()); |
+ Address object_end = object->address() + object->Size(); |
+ int count = Heap::IterateRSetRange(page->ObjectAreaStart(), |
+ Min(page->ObjectAreaEnd(), object_end), |
+ page->RSetStart(), |
+ copy_object_func); |
+ |
+ // Iterate the extra array elements. |
+ if (object_end > page->ObjectAreaEnd()) { |
+ count += Heap::IterateRSetRange(page->ObjectAreaEnd(), object_end, |
+ object_end, copy_object_func); |
} |
+ if (lo_rset_histogram != NULL) { |
+ StatsTable::AddHistogramSample(lo_rset_histogram, count); |
+ } |
} |
} |
} |
@@ -2822,7 +2995,7 @@ |
} else if (object->IsFixedArray()) { |
// We loop over fixed arrays ourselves, rather then using the visitor, |
// because the visitor doesn't support the start/offset iteration |
- // needed for IsRegionDirty. |
+ // needed for IsRSetSet. |
FixedArray* array = FixedArray::cast(object); |
for (int j = 0; j < array->length(); j++) { |
Object* element = array->get(j); |
@@ -2831,11 +3004,8 @@ |
ASSERT(Heap::Contains(element_object)); |
ASSERT(element_object->map()->IsMap()); |
if (Heap::InNewSpace(element_object)) { |
- Address array_addr = object->address(); |
- Address element_addr = array_addr + FixedArray::kHeaderSize + |
- j * kPointerSize; |
- |
- ASSERT(Page::FromAddress(array_addr)->IsRegionDirty(element_addr)); |
+ ASSERT(Page::IsRSetSet(object->address(), |
+ FixedArray::kHeaderSize + j * kPointerSize)); |
} |
} |
} |
@@ -2876,6 +3046,33 @@ |
} |
} |
} |
+ |
+ |
+void LargeObjectSpace::PrintRSet() { |
+ LargeObjectIterator it(this); |
+ for (HeapObject* object = it.next(); object != NULL; object = it.next()) { |
+ if (object->IsFixedArray()) { |
+ Page* page = Page::FromAddress(object->address()); |
+ |
+ Address allocation_top = object->address() + object->Size(); |
+ PrintF("large page 0x%x:\n", page); |
+ PrintRSetRange(page->RSetStart(), page->RSetEnd(), |
+ reinterpret_cast<Object**>(object->address()), |
+ allocation_top); |
+ int extra_array_bytes = object->Size() - Page::kObjectAreaSize; |
+ int extra_rset_bits = RoundUp(extra_array_bytes / kPointerSize, |
+ kBitsPerInt); |
+ PrintF("------------------------------------------------------------" |
+ "-----------\n"); |
+ PrintRSetRange(allocation_top, |
+ allocation_top + extra_rset_bits / kBitsPerByte, |
+ reinterpret_cast<Object**>(object->address() |
+ + Page::kObjectAreaSize), |
+ allocation_top); |
+ PrintF("\n"); |
+ } |
+ } |
+} |
#endif // DEBUG |
} } // namespace v8::internal |