Index: src/heap.cc |
=================================================================== |
--- src/heap.cc (revision 4686) |
+++ src/heap.cc (working copy) |
@@ -326,6 +326,13 @@ |
} |
if (FLAG_gc_verbose) Print(); |
+ |
+ if (FLAG_print_rset) { |
+ // Not all spaces have remembered set bits that we care about. |
+ old_pointer_space_->PrintRSet(); |
+ map_space_->PrintRSet(); |
+ lo_space_->PrintRSet(); |
+ } |
#endif |
#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING) |
@@ -512,8 +519,9 @@ |
Heap::CollectGarbage(cell_space_size, CELL_SPACE); |
gc_performed = true; |
} |
- // We add a slack-factor of 2 in order to have space for a series of |
- // large-object allocations that are only just larger than the page size. |
+ // We add a slack-factor of 2 in order to have space for the remembered |
+ // set and a series of large-object allocations that are only just larger |
+ // than the page size. |
large_object_size *= 2; |
// The ReserveSpace method on the large object space checks how much |
// we can expand the old generation. This includes expansion caused by |
@@ -564,25 +572,6 @@ |
} |
-#ifdef DEBUG |
- |
-enum PageWatermarkValidity { |
- ALL_VALID, |
- ALL_INVALID |
-}; |
- |
-static void VerifyPageWatermarkValidity(PagedSpace* space, |
- PageWatermarkValidity validity) { |
- PageIterator it(space, PageIterator::PAGES_IN_USE); |
- bool expected_value = (validity == ALL_VALID); |
- while (it.has_next()) { |
- Page* page = it.next(); |
- ASSERT(page->IsWatermarkValid() == expected_value); |
- } |
-} |
-#endif |
- |
- |
void Heap::PerformGarbageCollection(AllocationSpace space, |
GarbageCollector collector, |
GCTracer* tracer) { |
@@ -827,20 +816,6 @@ |
gc_state_ = SCAVENGE; |
- Page::FlipMeaningOfInvalidatedWatermarkFlag(); |
-#ifdef DEBUG |
- VerifyPageWatermarkValidity(old_pointer_space_, ALL_VALID); |
- VerifyPageWatermarkValidity(map_space_, ALL_VALID); |
-#endif |
- |
- // We do not update an allocation watermark of the top page during linear |
- // allocation to avoid overhead. So to maintain the watermark invariant |
- // we have to manually cache the watermark and mark the top page as having an |
- // invalid watermark. This guarantees that dirty regions iteration will use a |
- // correct watermark even if a linear allocation happens. |
- old_pointer_space_->FlushTopPageWatermark(); |
- map_space_->FlushTopPageWatermark(); |
- |
// Implements Cheney's copying algorithm |
LOG(ResourceEvent("scavenge", "begin")); |
@@ -883,18 +858,10 @@ |
// Copy objects reachable from the old generation. By definition, |
// there are no intergenerational pointers in code or data spaces. |
- IterateDirtyRegions(old_pointer_space_, |
- &IteratePointersInDirtyRegion, |
- &ScavengePointer, |
- WATERMARK_CAN_BE_INVALID); |
+ IterateRSet(old_pointer_space_, &ScavengePointer); |
+ IterateRSet(map_space_, &ScavengePointer); |
+ lo_space_->IterateRSet(&ScavengePointer); |
- IterateDirtyRegions(map_space_, |
- &IteratePointersInDirtyMapsRegion, |
- &ScavengePointer, |
- WATERMARK_CAN_BE_INVALID); |
- |
- lo_space_->IterateDirtyRegions(&ScavengePointer); |
- |
// Copy objects reachable from cells by scavenging cell values directly. |
HeapObjectIterator cell_iterator(cell_space_); |
for (HeapObject* cell = cell_iterator.next(); |
@@ -996,8 +963,9 @@ |
// Copy the from-space object to its new location (given by the |
// forwarding address) and fix its map. |
HeapObject* target = source->map_word().ToForwardingAddress(); |
- int size = source->SizeFromMap(map); |
- CopyBlock(target->address(), source->address(), size); |
+ CopyBlock(reinterpret_cast<Object**>(target->address()), |
+ reinterpret_cast<Object**>(source->address()), |
+ source->SizeFromMap(map)); |
target->set_map(map); |
#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING) |
@@ -1005,10 +973,8 @@ |
RecordCopiedObject(target); |
#endif |
// Visit the newly copied object for pointers to new space. |
- ASSERT(!target->IsMap()); |
- IterateAndMarkPointersToNewSpace(target->address(), |
- target->address() + size, |
- &ScavengePointer); |
+ target->Iterate(scavenge_visitor); |
+ UpdateRSet(target); |
} |
// Take another spin if there are now unswept objects in new space |
@@ -1019,6 +985,117 @@ |
} |
+void Heap::ClearRSetRange(Address start, int size_in_bytes) { |
+ uint32_t start_bit; |
+ Address start_word_address = |
+ Page::ComputeRSetBitPosition(start, 0, &start_bit); |
+ uint32_t end_bit; |
+ Address end_word_address = |
+ Page::ComputeRSetBitPosition(start + size_in_bytes - kIntSize, |
+ 0, |
+ &end_bit); |
+ |
+ // We want to clear the bits in the starting word starting with the |
+ // first bit, and in the ending word up to and including the last |
+ // bit. Build a pair of bitmasks to do that. |
+ uint32_t start_bitmask = start_bit - 1; |
+ uint32_t end_bitmask = ~((end_bit << 1) - 1); |
+ |
+ // If the start address and end address are the same, we mask that |
+ // word once, otherwise mask the starting and ending word |
+ // separately and all the ones in between. |
+ if (start_word_address == end_word_address) { |
+ Memory::uint32_at(start_word_address) &= (start_bitmask | end_bitmask); |
+ } else { |
+ Memory::uint32_at(start_word_address) &= start_bitmask; |
+ Memory::uint32_at(end_word_address) &= end_bitmask; |
+ start_word_address += kIntSize; |
+ memset(start_word_address, 0, end_word_address - start_word_address); |
+ } |
+} |
+ |
+ |
+class UpdateRSetVisitor: public ObjectVisitor { |
+ public: |
+ |
+ void VisitPointer(Object** p) { |
+ UpdateRSet(p); |
+ } |
+ |
+ void VisitPointers(Object** start, Object** end) { |
+ // Update a store into slots [start, end), used (a) to update remembered |
+ // set when promoting a young object to old space or (b) to rebuild |
+ // remembered sets after a mark-compact collection. |
+ for (Object** p = start; p < end; p++) UpdateRSet(p); |
+ } |
+ private: |
+ |
+ void UpdateRSet(Object** p) { |
+ // The remembered set should not be set. It should be clear for objects |
+ // newly copied to old space, and it is cleared before rebuilding in the |
+ // mark-compact collector. |
+ ASSERT(!Page::IsRSetSet(reinterpret_cast<Address>(p), 0)); |
+ if (Heap::InNewSpace(*p)) { |
+ Page::SetRSet(reinterpret_cast<Address>(p), 0); |
+ } |
+ } |
+}; |
+ |
+ |
+int Heap::UpdateRSet(HeapObject* obj) { |
+ ASSERT(!InNewSpace(obj)); |
+ // Special handling of fixed arrays to iterate the body based on the start |
+ // address and offset. Just iterating the pointers as in UpdateRSetVisitor |
+ // will not work because Page::SetRSet needs to have the start of the |
+ // object for large object pages. |
+ if (obj->IsFixedArray()) { |
+ FixedArray* array = FixedArray::cast(obj); |
+ int length = array->length(); |
+ for (int i = 0; i < length; i++) { |
+ int offset = FixedArray::kHeaderSize + i * kPointerSize; |
+ ASSERT(!Page::IsRSetSet(obj->address(), offset)); |
+ if (Heap::InNewSpace(array->get(i))) { |
+ Page::SetRSet(obj->address(), offset); |
+ } |
+ } |
+ } else if (!obj->IsCode()) { |
+ // Skip code object, we know it does not contain inter-generational |
+ // pointers. |
+ UpdateRSetVisitor v; |
+ obj->Iterate(&v); |
+ } |
+ return obj->Size(); |
+} |
+ |
+ |
+void Heap::RebuildRSets() { |
+ // By definition, we do not care about remembered set bits in code, |
+ // data, or cell spaces. |
+ map_space_->ClearRSet(); |
+ RebuildRSets(map_space_); |
+ |
+ old_pointer_space_->ClearRSet(); |
+ RebuildRSets(old_pointer_space_); |
+ |
+ Heap::lo_space_->ClearRSet(); |
+ RebuildRSets(lo_space_); |
+} |
+ |
+ |
+void Heap::RebuildRSets(PagedSpace* space) { |
+ HeapObjectIterator it(space); |
+ for (HeapObject* obj = it.next(); obj != NULL; obj = it.next()) |
+ Heap::UpdateRSet(obj); |
+} |
+ |
+ |
+void Heap::RebuildRSets(LargeObjectSpace* space) { |
+ LargeObjectIterator it(space); |
+ for (HeapObject* obj = it.next(); obj != NULL; obj = it.next()) |
+ Heap::UpdateRSet(obj); |
+} |
+ |
+ |
#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING) |
void Heap::RecordCopiedObject(HeapObject* obj) { |
bool should_record = false; |
@@ -1044,7 +1121,9 @@ |
HeapObject* target, |
int size) { |
// Copy the content of source to target. |
- CopyBlock(target->address(), source->address(), size); |
+ CopyBlock(reinterpret_cast<Object**>(target->address()), |
+ reinterpret_cast<Object**>(source->address()), |
+ size); |
// Set the forwarding address. |
source->set_map_word(MapWord::FromForwardingAddress(target)); |
@@ -1603,7 +1682,7 @@ |
// loop above because it needs to be allocated manually with the special |
// hash code in place. The hash code for the hidden_symbol is zero to ensure |
// that it will always be at the first entry in property descriptors. |
- obj = AllocateSymbol(CStrVector(""), 0, String::kZeroHash); |
+ obj = AllocateSymbol(CStrVector(""), 0, String::kHashComputedMask); |
if (obj->IsFailure()) return false; |
hidden_symbol_ = String::cast(obj); |
@@ -1839,9 +1918,6 @@ |
share->set_compiler_hints(0); |
share->set_this_property_assignments_count(0); |
share->set_this_property_assignments(undefined_value()); |
- share->set_num_literals(0); |
- share->set_end_position(0); |
- share->set_function_token_position(0); |
return result; |
} |
@@ -2103,8 +2179,8 @@ |
: lo_space_->AllocateRaw(size); |
if (result->IsFailure()) return result; |
- reinterpret_cast<ByteArray*>(result)->set_map(byte_array_map()); |
- reinterpret_cast<ByteArray*>(result)->set_length(length); |
+ reinterpret_cast<Array*>(result)->set_map(byte_array_map()); |
+ reinterpret_cast<Array*>(result)->set_length(length); |
return result; |
} |
@@ -2119,8 +2195,8 @@ |
Object* result = AllocateRaw(size, space, OLD_DATA_SPACE); |
if (result->IsFailure()) return result; |
- reinterpret_cast<ByteArray*>(result)->set_map(byte_array_map()); |
- reinterpret_cast<ByteArray*>(result)->set_length(length); |
+ reinterpret_cast<Array*>(result)->set_map(byte_array_map()); |
+ reinterpret_cast<Array*>(result)->set_length(length); |
return result; |
} |
@@ -2236,7 +2312,9 @@ |
// Copy code object. |
Address old_addr = code->address(); |
Address new_addr = reinterpret_cast<HeapObject*>(result)->address(); |
- CopyBlock(new_addr, old_addr, obj_size); |
+ CopyBlock(reinterpret_cast<Object**>(new_addr), |
+ reinterpret_cast<Object**>(old_addr), |
+ obj_size); |
// Relocate the copy. |
Code* new_code = Code::cast(result); |
ASSERT(!CodeRange::exists() || CodeRange::contains(code->address())); |
@@ -2382,8 +2460,8 @@ |
// Copy the content. The arguments boilerplate doesn't have any |
// fields that point to new space so it's safe to skip the write |
// barrier here. |
- CopyBlock(HeapObject::cast(result)->address(), |
- boilerplate->address(), |
+ CopyBlock(reinterpret_cast<Object**>(HeapObject::cast(result)->address()), |
+ reinterpret_cast<Object**>(boilerplate->address()), |
kArgumentsObjectSize); |
// Set the two properties. |
@@ -2605,8 +2683,8 @@ |
clone = AllocateRaw(object_size, NEW_SPACE, OLD_POINTER_SPACE); |
if (clone->IsFailure()) return clone; |
Address clone_address = HeapObject::cast(clone)->address(); |
- CopyBlock(clone_address, |
- source->address(), |
+ CopyBlock(reinterpret_cast<Object**>(clone_address), |
+ reinterpret_cast<Object**>(source->address()), |
object_size); |
// Update write barrier for all fields that lie beyond the header. |
RecordWrites(clone_address, |
@@ -2618,8 +2696,8 @@ |
ASSERT(Heap::InNewSpace(clone)); |
// Since we know the clone is allocated in new space, we can copy |
// the contents without worrying about updating the write barrier. |
- CopyBlock(HeapObject::cast(clone)->address(), |
- source->address(), |
+ CopyBlock(reinterpret_cast<Object**>(HeapObject::cast(clone)->address()), |
+ reinterpret_cast<Object**>(source->address()), |
object_size); |
} |
@@ -2890,8 +2968,8 @@ |
Object* result = AllocateRaw(size, OLD_DATA_SPACE, OLD_DATA_SPACE); |
if (result->IsFailure()) return result; |
// Initialize the object. |
- reinterpret_cast<FixedArray*>(result)->set_map(fixed_array_map()); |
- reinterpret_cast<FixedArray*>(result)->set_length(0); |
+ reinterpret_cast<Array*>(result)->set_map(fixed_array_map()); |
+ reinterpret_cast<Array*>(result)->set_length(0); |
return result; |
} |
@@ -2916,7 +2994,9 @@ |
if (obj->IsFailure()) return obj; |
if (Heap::InNewSpace(obj)) { |
HeapObject* dst = HeapObject::cast(obj); |
- CopyBlock(dst->address(), src->address(), FixedArray::SizeFor(len)); |
+ CopyBlock(reinterpret_cast<Object**>(dst->address()), |
+ reinterpret_cast<Object**>(src->address()), |
+ FixedArray::SizeFor(len)); |
return obj; |
} |
HeapObject::cast(obj)->set_map(src->map()); |
@@ -2937,8 +3017,8 @@ |
Object* result = AllocateRawFixedArray(length); |
if (!result->IsFailure()) { |
// Initialize header. |
- FixedArray* array = reinterpret_cast<FixedArray*>(result); |
- array->set_map(fixed_array_map()); |
+ reinterpret_cast<Array*>(result)->set_map(fixed_array_map()); |
+ FixedArray* array = FixedArray::cast(result); |
array->set_length(length); |
// Initialize body. |
ASSERT(!Heap::InNewSpace(undefined_value())); |
@@ -2965,10 +3045,27 @@ |
space = LO_SPACE; |
} |
- AllocationSpace retry_space = |
- (size <= MaxObjectSizeInPagedSpace()) ? OLD_POINTER_SPACE : LO_SPACE; |
- |
- return AllocateRaw(size, space, retry_space); |
+ // Specialize allocation for the space. |
+ Object* result = Failure::OutOfMemoryException(); |
+ if (space == NEW_SPACE) { |
+ // We cannot use Heap::AllocateRaw() because it will not properly |
+ // allocate extra remembered set bits if always_allocate() is true and |
+ // new space allocation fails. |
+ result = new_space_.AllocateRaw(size); |
+ if (result->IsFailure() && always_allocate()) { |
+ if (size <= MaxObjectSizeInPagedSpace()) { |
+ result = old_pointer_space_->AllocateRaw(size); |
+ } else { |
+ result = lo_space_->AllocateRawFixedArray(size); |
+ } |
+ } |
+ } else if (space == OLD_POINTER_SPACE) { |
+ result = old_pointer_space_->AllocateRaw(size); |
+ } else { |
+ ASSERT(space == LO_SPACE); |
+ result = lo_space_->AllocateRawFixedArray(size); |
+ } |
+ return result; |
} |
@@ -3016,7 +3113,7 @@ |
Object* Heap::AllocateHashTable(int length, PretenureFlag pretenure) { |
Object* result = Heap::AllocateFixedArray(length, pretenure); |
if (result->IsFailure()) return result; |
- reinterpret_cast<HeapObject*>(result)->set_map(hash_table_map()); |
+ reinterpret_cast<Array*>(result)->set_map(hash_table_map()); |
ASSERT(result->IsHashTable()); |
return result; |
} |
@@ -3268,30 +3365,6 @@ |
#ifdef DEBUG |
- |
- |
-static bool VerifyPointersInDirtyRegion(Address start, |
- Address end, |
- ObjectSlotCallback copy_object_func |
- ) { |
- Address slot_address = start; |
- |
- while (slot_address < end) { |
- Object** slot = reinterpret_cast<Object**>(slot_address); |
- if (Heap::InNewSpace(*slot)) { |
- ASSERT(Heap::InToSpace(*slot)); |
- } |
- slot_address += kPointerSize; |
- } |
- |
- return true; |
-} |
- |
- |
-static void DummyScavengePointer(HeapObject** p) { |
-} |
- |
- |
void Heap::Verify() { |
ASSERT(HasBeenSetup()); |
@@ -3300,29 +3373,15 @@ |
new_space_.Verify(); |
- VerifyPointersAndDirtyRegionsVisitor dirty_regions_visitor; |
- old_pointer_space_->Verify(&dirty_regions_visitor); |
- map_space_->Verify(&dirty_regions_visitor); |
+ VerifyPointersAndRSetVisitor rset_visitor; |
+ old_pointer_space_->Verify(&rset_visitor); |
+ map_space_->Verify(&rset_visitor); |
- IterateDirtyRegions(old_pointer_space_, |
- &VerifyPointersInDirtyRegion, |
- &DummyScavengePointer, |
- WATERMARK_SHOULD_BE_VALID); |
+ VerifyPointersVisitor no_rset_visitor; |
+ old_data_space_->Verify(&no_rset_visitor); |
+ code_space_->Verify(&no_rset_visitor); |
+ cell_space_->Verify(&no_rset_visitor); |
- IterateDirtyRegions(map_space_, |
- &VerifyPointersInDirtyRegion, |
- &DummyScavengePointer, |
- WATERMARK_SHOULD_BE_VALID); |
- |
- |
- VerifyPageWatermarkValidity(old_pointer_space_, ALL_INVALID); |
- VerifyPageWatermarkValidity(map_space_, ALL_INVALID); |
- |
- VerifyPointersVisitor no_dirty_regions_visitor; |
- old_data_space_->Verify(&no_dirty_regions_visitor); |
- code_space_->Verify(&no_dirty_regions_visitor); |
- cell_space_->Verify(&no_dirty_regions_visitor); |
- |
lo_space_->Verify(); |
} |
#endif // DEBUG |
@@ -3374,253 +3433,65 @@ |
#endif // DEBUG |
-bool Heap::IteratePointersInDirtyRegion(Address start, |
- Address end, |
- ObjectSlotCallback copy_object_func) { |
- Address slot_address = start; |
- bool pointers_to_new_space_found = false; |
+int Heap::IterateRSetRange(Address object_start, |
+ Address object_end, |
+ Address rset_start, |
+ ObjectSlotCallback copy_object_func) { |
+ Address object_address = object_start; |
+ Address rset_address = rset_start; |
+ int set_bits_count = 0; |
- while (slot_address < end) { |
- Object** slot = reinterpret_cast<Object**>(slot_address); |
- if (Heap::InNewSpace(*slot)) { |
- ASSERT((*slot)->IsHeapObject()); |
- copy_object_func(reinterpret_cast<HeapObject**>(slot)); |
- if (Heap::InNewSpace(*slot)) { |
- ASSERT((*slot)->IsHeapObject()); |
- pointers_to_new_space_found = true; |
+ // Loop over all the pointers in [object_start, object_end). |
+ while (object_address < object_end) { |
+ uint32_t rset_word = Memory::uint32_at(rset_address); |
+ if (rset_word != 0) { |
+ uint32_t result_rset = rset_word; |
+ for (uint32_t bitmask = 1; bitmask != 0; bitmask = bitmask << 1) { |
+ // Do not dereference pointers at or past object_end. |
+ if ((rset_word & bitmask) != 0 && object_address < object_end) { |
+ Object** object_p = reinterpret_cast<Object**>(object_address); |
+ if (Heap::InNewSpace(*object_p)) { |
+ copy_object_func(reinterpret_cast<HeapObject**>(object_p)); |
+ } |
+ // If this pointer does not need to be remembered anymore, clear |
+ // the remembered set bit. |
+ if (!Heap::InNewSpace(*object_p)) result_rset &= ~bitmask; |
+ set_bits_count++; |
+ } |
+ object_address += kPointerSize; |
} |
- } |
- slot_address += kPointerSize; |
- } |
- return pointers_to_new_space_found; |
-} |
- |
- |
-// Compute start address of the first map following given addr. |
-static inline Address MapStartAlign(Address addr) { |
- Address page = Page::FromAddress(addr)->ObjectAreaStart(); |
- return page + (((addr - page) + (Map::kSize - 1)) / Map::kSize * Map::kSize); |
-} |
- |
- |
-// Compute end address of the first map preceding given addr. |
-static inline Address MapEndAlign(Address addr) { |
- Address page = Page::FromAllocationTop(addr)->ObjectAreaStart(); |
- return page + ((addr - page) / Map::kSize * Map::kSize); |
-} |
- |
- |
-static bool IteratePointersInDirtyMaps(Address start, |
- Address end, |
- ObjectSlotCallback copy_object_func) { |
- ASSERT(MapStartAlign(start) == start); |
- ASSERT(MapEndAlign(end) == end); |
- |
- Address map_address = start; |
- bool pointers_to_new_space_found = false; |
- |
- while (map_address < end) { |
- ASSERT(!Heap::InNewSpace(Memory::Object_at(map_address))); |
- ASSERT(Memory::Object_at(map_address)->IsMap()); |
- |
- Address pointer_fields_start = map_address + Map::kPointerFieldsBeginOffset; |
- Address pointer_fields_end = map_address + Map::kPointerFieldsEndOffset; |
- |
- if (Heap::IteratePointersInDirtyRegion(pointer_fields_start, |
- pointer_fields_end, |
- copy_object_func)) { |
- pointers_to_new_space_found = true; |
- } |
- |
- map_address += Map::kSize; |
- } |
- |
- return pointers_to_new_space_found; |
-} |
- |
- |
-bool Heap::IteratePointersInDirtyMapsRegion( |
- Address start, |
- Address end, |
- ObjectSlotCallback copy_object_func) { |
- Address map_aligned_start = MapStartAlign(start); |
- Address map_aligned_end = MapEndAlign(end); |
- |
- bool contains_pointers_to_new_space = false; |
- |
- if (map_aligned_start != start) { |
- Address prev_map = map_aligned_start - Map::kSize; |
- ASSERT(Memory::Object_at(prev_map)->IsMap()); |
- |
- Address pointer_fields_start = |
- Max(start, prev_map + Map::kPointerFieldsBeginOffset); |
- |
- Address pointer_fields_end = |
- Min(prev_map + Map::kCodeCacheOffset + kPointerSize, end); |
- |
- contains_pointers_to_new_space = |
- IteratePointersInDirtyRegion(pointer_fields_start, |
- pointer_fields_end, |
- copy_object_func) |
- || contains_pointers_to_new_space; |
- } |
- |
- contains_pointers_to_new_space = |
- IteratePointersInDirtyMaps(map_aligned_start, |
- map_aligned_end, |
- copy_object_func) |
- || contains_pointers_to_new_space; |
- |
- if (map_aligned_end != end) { |
- ASSERT(Memory::Object_at(map_aligned_end)->IsMap()); |
- |
- Address pointer_fields_start = map_aligned_end + Map::kPrototypeOffset; |
- |
- Address pointer_fields_end = |
- Min(end, map_aligned_end + Map::kCodeCacheOffset + kPointerSize); |
- |
- contains_pointers_to_new_space = |
- IteratePointersInDirtyRegion(pointer_fields_start, |
- pointer_fields_end, |
- copy_object_func) |
- || contains_pointers_to_new_space; |
- } |
- |
- return contains_pointers_to_new_space; |
-} |
- |
- |
-void Heap::IterateAndMarkPointersToNewSpace(Address start, |
- Address end, |
- ObjectSlotCallback callback) { |
- Address slot_address = start; |
- Page* page = Page::FromAddress(start); |
- |
- uint32_t marks = page->GetRegionMarks(); |
- |
- while (slot_address < end) { |
- Object** slot = reinterpret_cast<Object**>(slot_address); |
- if (Heap::InNewSpace(*slot)) { |
- ASSERT((*slot)->IsHeapObject()); |
- callback(reinterpret_cast<HeapObject**>(slot)); |
- if (Heap::InNewSpace(*slot)) { |
- ASSERT((*slot)->IsHeapObject()); |
- marks |= page->GetRegionMaskForAddress(slot_address); |
+ // Update the remembered set if it has changed. |
+ if (result_rset != rset_word) { |
+ Memory::uint32_at(rset_address) = result_rset; |
} |
+ } else { |
+ // No bits in the word were set. This is the common case. |
+ object_address += kPointerSize * kBitsPerInt; |
} |
- slot_address += kPointerSize; |
+ rset_address += kIntSize; |
} |
- |
- page->SetRegionMarks(marks); |
+ return set_bits_count; |
} |
-uint32_t Heap::IterateDirtyRegions( |
- uint32_t marks, |
- Address area_start, |
- Address area_end, |
- DirtyRegionCallback visit_dirty_region, |
- ObjectSlotCallback copy_object_func) { |
- uint32_t newmarks = 0; |
- uint32_t mask = 1; |
+void Heap::IterateRSet(PagedSpace* space, ObjectSlotCallback copy_object_func) { |
+ ASSERT(Page::is_rset_in_use()); |
+ ASSERT(space == old_pointer_space_ || space == map_space_); |
- if (area_start >= area_end) { |
- return newmarks; |
- } |
+ static void* paged_rset_histogram = StatsTable::CreateHistogram( |
+ "V8.RSetPaged", |
+ 0, |
+ Page::kObjectAreaSize / kPointerSize, |
+ 30); |
- Address region_start = area_start; |
- |
- // area_start does not necessarily coincide with start of the first region. |
- // Thus to calculate the beginning of the next region we have to align |
- // area_start by Page::kRegionSize. |
- Address second_region = |
- reinterpret_cast<Address>( |
- reinterpret_cast<intptr_t>(area_start + Page::kRegionSize) & |
- ~Page::kRegionAlignmentMask); |
- |
- // Next region might be beyond area_end. |
- Address region_end = Min(second_region, area_end); |
- |
- if (marks & mask) { |
- if (visit_dirty_region(region_start, region_end, copy_object_func)) { |
- newmarks |= mask; |
- } |
- } |
- mask <<= 1; |
- |
- // Iterate subsequent regions which fully lay inside [area_start, area_end[. |
- region_start = region_end; |
- region_end = region_start + Page::kRegionSize; |
- |
- while (region_end <= area_end) { |
- if (marks & mask) { |
- if (visit_dirty_region(region_start, region_end, copy_object_func)) { |
- newmarks |= mask; |
- } |
- } |
- |
- region_start = region_end; |
- region_end = region_start + Page::kRegionSize; |
- |
- mask <<= 1; |
- } |
- |
- if (region_start != area_end) { |
- // A small piece of area left uniterated because area_end does not coincide |
- // with region end. Check whether region covering last part of area is |
- // dirty. |
- if (marks & mask) { |
- if (visit_dirty_region(region_start, area_end, copy_object_func)) { |
- newmarks |= mask; |
- } |
- } |
- } |
- |
- return newmarks; |
-} |
- |
- |
- |
-void Heap::IterateDirtyRegions( |
- PagedSpace* space, |
- DirtyRegionCallback visit_dirty_region, |
- ObjectSlotCallback copy_object_func, |
- ExpectedPageWatermarkState expected_page_watermark_state) { |
- |
PageIterator it(space, PageIterator::PAGES_IN_USE); |
- |
while (it.has_next()) { |
Page* page = it.next(); |
- uint32_t marks = page->GetRegionMarks(); |
- |
- if (marks != Page::kAllRegionsCleanMarks) { |
- Address start = page->ObjectAreaStart(); |
- |
- // Do not try to visit pointers beyond page allocation watermark. |
- // Page can contain garbage pointers there. |
- Address end; |
- |
- if ((expected_page_watermark_state == WATERMARK_SHOULD_BE_VALID) || |
- page->IsWatermarkValid()) { |
- end = page->AllocationWatermark(); |
- } else { |
- end = page->CachedAllocationWatermark(); |
- } |
- |
- ASSERT(space == old_pointer_space_ || |
- (space == map_space_ && |
- ((page->ObjectAreaStart() - end) % Map::kSize == 0))); |
- |
- page->SetRegionMarks(IterateDirtyRegions(marks, |
- start, |
- end, |
- visit_dirty_region, |
- copy_object_func)); |
+ int count = IterateRSetRange(page->ObjectAreaStart(), page->AllocationTop(), |
+ page->RSetStart(), copy_object_func); |
+ if (paged_rset_histogram != NULL) { |
+ StatsTable::AddHistogramSample(paged_rset_histogram, count); |
} |
- |
- // Mark page watermark as invalid to maintain watermark validity invariant. |
- // See Page::FlipMeaningOfInvalidatedWatermarkFlag() for details. |
- page->InvalidateWatermark(true); |
} |
} |