Chromium Code Reviews| Index: src/heap.cc |
| diff --git a/src/heap.cc b/src/heap.cc |
| index d554a3ba684c10517bc6cbb2cd5620ebceef564c..04737a4bcd0fb6ae264c1738cd127386b139811d 100644 |
| --- a/src/heap.cc |
| +++ b/src/heap.cc |
| @@ -326,13 +326,6 @@ void Heap::GarbageCollectionPrologue() { |
| } |
| if (FLAG_gc_verbose) Print(); |
| - |
| - if (FLAG_print_rset) { |
| - // Not all spaces have remembered set bits that we care about. |
| - old_pointer_space_->PrintRSet(); |
| - map_space_->PrintRSet(); |
| - lo_space_->PrintRSet(); |
| - } |
| #endif |
| #if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING) |
| @@ -519,9 +512,8 @@ void Heap::ReserveSpace( |
| Heap::CollectGarbage(cell_space_size, CELL_SPACE); |
| gc_performed = true; |
| } |
| - // We add a slack-factor of 2 in order to have space for the remembered |
| - // set and a series of large-object allocations that are only just larger |
| - // than the page size. |
| + // We add a slack-factor of 2 in order to have space for a series of |
| + // large-object allocations that are only just larger than the page size. |
| large_object_size *= 2; |
| // The ReserveSpace method on the large object space checks how much |
| // we can expand the old generation. This includes expansion caused by |
| @@ -572,6 +564,25 @@ void Heap::ClearJSFunctionResultCaches() { |
| } |
| +#ifdef DEBUG |
| + |
| +enum PageWatermarkValidity { |
| + ALL_VALID, |
| + ALL_INVALID |
| +}; |
| + |
| +static void VerifyPageWatermarkValidity(PagedSpace* space, |
| + PageWatermarkValidity validity) { |
| + PageIterator it(space, PageIterator::PAGES_IN_USE); |
| + bool expected_value = (validity == ALL_VALID); |
| + while (it.has_next()) { |
| + Page* page = it.next(); |
| + ASSERT(page->IsWatermarkValid() == expected_value); |
| + } |
| +} |
| +#endif |
| + |
| + |
| void Heap::PerformGarbageCollection(AllocationSpace space, |
| GarbageCollector collector, |
| GCTracer* tracer) { |
| @@ -816,6 +827,20 @@ void Heap::Scavenge() { |
| gc_state_ = SCAVENGE; |
| + Page::FlipMeaningOfInvalidatedWatermarkFlag(); |
| +#ifdef DEBUG |
| + VerifyPageWatermarkValidity(old_pointer_space_, ALL_VALID); |
| + VerifyPageWatermarkValidity(map_space_, ALL_VALID); |
| +#endif |
| + |
| + // We do not update an allocation watermark of the top page during linear |
| + // allocation to avoid overhead. So to maintain the watermark invariant |
| + // we have to manually cache the watermark and mark the top page as having an |
| + // invalid watermark. This guarantees that dirty regions iteration will use a |
| + // correct watermark even if a linear allocation happens. |
| + old_pointer_space_->FlushTopPageWatermark(); |
| + map_space_->FlushTopPageWatermark(); |
| + |
| // Implements Cheney's copying algorithm |
| LOG(ResourceEvent("scavenge", "begin")); |
| @@ -858,9 +883,17 @@ void Heap::Scavenge() { |
| // Copy objects reachable from the old generation. By definition, |
| // there are no intergenerational pointers in code or data spaces. |
| - IterateRSet(old_pointer_space_, &ScavengePointer); |
| - IterateRSet(map_space_, &ScavengePointer); |
| - lo_space_->IterateRSet(&ScavengePointer); |
| + IterateDirtyRegions(old_pointer_space_, |
| + &IteratePointersInDirtyRegion, |
| + &ScavengePointer, |
| + WATERMARK_CAN_BE_INVALID); |
| + |
| + IterateDirtyRegions(map_space_, |
| + &IteratePointersInDirtyMapsRegion, |
| + &ScavengePointer, |
| + WATERMARK_CAN_BE_INVALID); |
| + |
| + lo_space_->IterateDirtyRegions(&ScavengePointer); |
| // Copy objects reachable from cells by scavenging cell values directly. |
| HeapObjectIterator cell_iterator(cell_space_); |
| @@ -963,9 +996,8 @@ Address Heap::DoScavenge(ObjectVisitor* scavenge_visitor, |
| // Copy the from-space object to its new location (given by the |
| // forwarding address) and fix its map. |
| HeapObject* target = source->map_word().ToForwardingAddress(); |
| - CopyBlock(reinterpret_cast<Object**>(target->address()), |
| - reinterpret_cast<Object**>(source->address()), |
| - source->SizeFromMap(map)); |
| + int size = source->SizeFromMap(map); |
| + CopyBlock(target->address(), source->address(), size); |
| target->set_map(map); |
| #if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING) |
| @@ -973,8 +1005,10 @@ Address Heap::DoScavenge(ObjectVisitor* scavenge_visitor, |
| RecordCopiedObject(target); |
| #endif |
| // Visit the newly copied object for pointers to new space. |
| - target->Iterate(scavenge_visitor); |
| - UpdateRSet(target); |
| + ASSERT(!target->IsMap()); |
| + IterateAndMarkPointersToNewSpace(target->address(), |
| + target->address() + size, |
| + &ScavengePointer); |
| } |
| // Take another spin if there are now unswept objects in new space |
| @@ -985,117 +1019,6 @@ Address Heap::DoScavenge(ObjectVisitor* scavenge_visitor, |
| } |
| -void Heap::ClearRSetRange(Address start, int size_in_bytes) { |
| - uint32_t start_bit; |
| - Address start_word_address = |
| - Page::ComputeRSetBitPosition(start, 0, &start_bit); |
| - uint32_t end_bit; |
| - Address end_word_address = |
| - Page::ComputeRSetBitPosition(start + size_in_bytes - kIntSize, |
| - 0, |
| - &end_bit); |
| - |
| - // We want to clear the bits in the starting word starting with the |
| - // first bit, and in the ending word up to and including the last |
| - // bit. Build a pair of bitmasks to do that. |
| - uint32_t start_bitmask = start_bit - 1; |
| - uint32_t end_bitmask = ~((end_bit << 1) - 1); |
| - |
| - // If the start address and end address are the same, we mask that |
| - // word once, otherwise mask the starting and ending word |
| - // separately and all the ones in between. |
| - if (start_word_address == end_word_address) { |
| - Memory::uint32_at(start_word_address) &= (start_bitmask | end_bitmask); |
| - } else { |
| - Memory::uint32_at(start_word_address) &= start_bitmask; |
| - Memory::uint32_at(end_word_address) &= end_bitmask; |
| - start_word_address += kIntSize; |
| - memset(start_word_address, 0, end_word_address - start_word_address); |
| - } |
| -} |
| - |
| - |
| -class UpdateRSetVisitor: public ObjectVisitor { |
| - public: |
| - |
| - void VisitPointer(Object** p) { |
| - UpdateRSet(p); |
| - } |
| - |
| - void VisitPointers(Object** start, Object** end) { |
| - // Update a store into slots [start, end), used (a) to update remembered |
| - // set when promoting a young object to old space or (b) to rebuild |
| - // remembered sets after a mark-compact collection. |
| - for (Object** p = start; p < end; p++) UpdateRSet(p); |
| - } |
| - private: |
| - |
| - void UpdateRSet(Object** p) { |
| - // The remembered set should not be set. It should be clear for objects |
| - // newly copied to old space, and it is cleared before rebuilding in the |
| - // mark-compact collector. |
| - ASSERT(!Page::IsRSetSet(reinterpret_cast<Address>(p), 0)); |
| - if (Heap::InNewSpace(*p)) { |
| - Page::SetRSet(reinterpret_cast<Address>(p), 0); |
| - } |
| - } |
| -}; |
| - |
| - |
| -int Heap::UpdateRSet(HeapObject* obj) { |
| - ASSERT(!InNewSpace(obj)); |
| - // Special handling of fixed arrays to iterate the body based on the start |
| - // address and offset. Just iterating the pointers as in UpdateRSetVisitor |
| - // will not work because Page::SetRSet needs to have the start of the |
| - // object for large object pages. |
| - if (obj->IsFixedArray()) { |
| - FixedArray* array = FixedArray::cast(obj); |
| - int length = array->length(); |
| - for (int i = 0; i < length; i++) { |
| - int offset = FixedArray::kHeaderSize + i * kPointerSize; |
| - ASSERT(!Page::IsRSetSet(obj->address(), offset)); |
| - if (Heap::InNewSpace(array->get(i))) { |
| - Page::SetRSet(obj->address(), offset); |
| - } |
| - } |
| - } else if (!obj->IsCode()) { |
| - // Skip code object, we know it does not contain inter-generational |
| - // pointers. |
| - UpdateRSetVisitor v; |
| - obj->Iterate(&v); |
| - } |
| - return obj->Size(); |
| -} |
| - |
| - |
| -void Heap::RebuildRSets() { |
| - // By definition, we do not care about remembered set bits in code, |
| - // data, or cell spaces. |
| - map_space_->ClearRSet(); |
| - RebuildRSets(map_space_); |
| - |
| - old_pointer_space_->ClearRSet(); |
| - RebuildRSets(old_pointer_space_); |
| - |
| - Heap::lo_space_->ClearRSet(); |
| - RebuildRSets(lo_space_); |
| -} |
| - |
| - |
| -void Heap::RebuildRSets(PagedSpace* space) { |
| - HeapObjectIterator it(space); |
| - for (HeapObject* obj = it.next(); obj != NULL; obj = it.next()) |
| - Heap::UpdateRSet(obj); |
| -} |
| - |
| - |
| -void Heap::RebuildRSets(LargeObjectSpace* space) { |
| - LargeObjectIterator it(space); |
| - for (HeapObject* obj = it.next(); obj != NULL; obj = it.next()) |
| - Heap::UpdateRSet(obj); |
| -} |
| - |
| - |
| #if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING) |
| void Heap::RecordCopiedObject(HeapObject* obj) { |
| bool should_record = false; |
| @@ -1121,9 +1044,7 @@ HeapObject* Heap::MigrateObject(HeapObject* source, |
| HeapObject* target, |
| int size) { |
| // Copy the content of source to target. |
| - CopyBlock(reinterpret_cast<Object**>(target->address()), |
| - reinterpret_cast<Object**>(source->address()), |
| - size); |
| + CopyBlock(target->address(), source->address(), size); |
| // Set the forwarding address. |
| source->set_map_word(MapWord::FromForwardingAddress(target)); |
| @@ -1682,7 +1603,7 @@ bool Heap::CreateInitialObjects() { |
| // loop above because it needs to be allocated manually with the special |
| // hash code in place. The hash code for the hidden_symbol is zero to ensure |
| // that it will always be at the first entry in property descriptors. |
| - obj = AllocateSymbol(CStrVector(""), 0, String::kHashComputedMask); |
| + obj = AllocateSymbol(CStrVector(""), 0, String::kZeroHash); |
| if (obj->IsFailure()) return false; |
| hidden_symbol_ = String::cast(obj); |
| @@ -1918,6 +1839,9 @@ Object* Heap::AllocateSharedFunctionInfo(Object* name) { |
| share->set_compiler_hints(0); |
| share->set_this_property_assignments_count(0); |
| share->set_this_property_assignments(undefined_value()); |
| + share->set_num_literals(0); |
| + share->set_end_position(0); |
| + share->set_function_token_position(0); |
| return result; |
| } |
| @@ -2179,8 +2103,8 @@ Object* Heap::AllocateByteArray(int length, PretenureFlag pretenure) { |
| : lo_space_->AllocateRaw(size); |
| if (result->IsFailure()) return result; |
| - reinterpret_cast<Array*>(result)->set_map(byte_array_map()); |
| - reinterpret_cast<Array*>(result)->set_length(length); |
| + reinterpret_cast<ByteArray*>(result)->set_map(byte_array_map()); |
| + reinterpret_cast<ByteArray*>(result)->set_length(length); |
| return result; |
| } |
| @@ -2195,8 +2119,8 @@ Object* Heap::AllocateByteArray(int length) { |
| Object* result = AllocateRaw(size, space, OLD_DATA_SPACE); |
| if (result->IsFailure()) return result; |
| - reinterpret_cast<Array*>(result)->set_map(byte_array_map()); |
| - reinterpret_cast<Array*>(result)->set_length(length); |
| + reinterpret_cast<ByteArray*>(result)->set_map(byte_array_map()); |
| + reinterpret_cast<ByteArray*>(result)->set_length(length); |
| return result; |
| } |
| @@ -2312,9 +2236,7 @@ Object* Heap::CopyCode(Code* code) { |
| // Copy code object. |
| Address old_addr = code->address(); |
| Address new_addr = reinterpret_cast<HeapObject*>(result)->address(); |
| - CopyBlock(reinterpret_cast<Object**>(new_addr), |
| - reinterpret_cast<Object**>(old_addr), |
| - obj_size); |
| + CopyBlock(new_addr, old_addr, obj_size); |
| // Relocate the copy. |
| Code* new_code = Code::cast(result); |
| ASSERT(!CodeRange::exists() || CodeRange::contains(code->address())); |
| @@ -2460,8 +2382,8 @@ Object* Heap::AllocateArgumentsObject(Object* callee, int length) { |
| // Copy the content. The arguments boilerplate doesn't have any |
| // fields that point to new space so it's safe to skip the write |
| // barrier here. |
| - CopyBlock(reinterpret_cast<Object**>(HeapObject::cast(result)->address()), |
| - reinterpret_cast<Object**>(boilerplate->address()), |
| + CopyBlock(HeapObject::cast(result)->address(), |
| + boilerplate->address(), |
| kArgumentsObjectSize); |
| // Set the two properties. |
| @@ -2683,8 +2605,8 @@ Object* Heap::CopyJSObject(JSObject* source) { |
| clone = AllocateRaw(object_size, NEW_SPACE, OLD_POINTER_SPACE); |
| if (clone->IsFailure()) return clone; |
| Address clone_address = HeapObject::cast(clone)->address(); |
| - CopyBlock(reinterpret_cast<Object**>(clone_address), |
| - reinterpret_cast<Object**>(source->address()), |
| + CopyBlock(clone_address, |
| + source->address(), |
| object_size); |
| // Update write barrier for all fields that lie beyond the header. |
| RecordWrites(clone_address, |
| @@ -2696,8 +2618,8 @@ Object* Heap::CopyJSObject(JSObject* source) { |
| ASSERT(Heap::InNewSpace(clone)); |
| // Since we know the clone is allocated in new space, we can copy |
| // the contents without worrying about updating the write barrier. |
| - CopyBlock(reinterpret_cast<Object**>(HeapObject::cast(clone)->address()), |
| - reinterpret_cast<Object**>(source->address()), |
| + CopyBlock(HeapObject::cast(clone)->address(), |
| + source->address(), |
| object_size); |
| } |
| @@ -2968,8 +2890,8 @@ Object* Heap::AllocateEmptyFixedArray() { |
| Object* result = AllocateRaw(size, OLD_DATA_SPACE, OLD_DATA_SPACE); |
| if (result->IsFailure()) return result; |
| // Initialize the object. |
| - reinterpret_cast<Array*>(result)->set_map(fixed_array_map()); |
| - reinterpret_cast<Array*>(result)->set_length(0); |
| + reinterpret_cast<FixedArray*>(result)->set_map(fixed_array_map()); |
| + reinterpret_cast<FixedArray*>(result)->set_length(0); |
| return result; |
| } |
| @@ -2994,9 +2916,7 @@ Object* Heap::CopyFixedArray(FixedArray* src) { |
| if (obj->IsFailure()) return obj; |
| if (Heap::InNewSpace(obj)) { |
| HeapObject* dst = HeapObject::cast(obj); |
| - CopyBlock(reinterpret_cast<Object**>(dst->address()), |
| - reinterpret_cast<Object**>(src->address()), |
| - FixedArray::SizeFor(len)); |
| + CopyBlock(dst->address(), src->address(), FixedArray::SizeFor(len)); |
| return obj; |
| } |
| HeapObject::cast(obj)->set_map(src->map()); |
| @@ -3017,8 +2937,8 @@ Object* Heap::AllocateFixedArray(int length) { |
| Object* result = AllocateRawFixedArray(length); |
| if (!result->IsFailure()) { |
| // Initialize header. |
| - reinterpret_cast<Array*>(result)->set_map(fixed_array_map()); |
| - FixedArray* array = FixedArray::cast(result); |
| + FixedArray* array = reinterpret_cast<FixedArray*>(result); |
| + array->set_map(fixed_array_map()); |
| array->set_length(length); |
| // Initialize body. |
| ASSERT(!Heap::InNewSpace(undefined_value())); |
| @@ -3045,27 +2965,10 @@ Object* Heap::AllocateRawFixedArray(int length, PretenureFlag pretenure) { |
| space = LO_SPACE; |
| } |
| - // Specialize allocation for the space. |
| - Object* result = Failure::OutOfMemoryException(); |
| - if (space == NEW_SPACE) { |
| - // We cannot use Heap::AllocateRaw() because it will not properly |
| - // allocate extra remembered set bits if always_allocate() is true and |
| - // new space allocation fails. |
| - result = new_space_.AllocateRaw(size); |
| - if (result->IsFailure() && always_allocate()) { |
| - if (size <= MaxObjectSizeInPagedSpace()) { |
| - result = old_pointer_space_->AllocateRaw(size); |
| - } else { |
| - result = lo_space_->AllocateRawFixedArray(size); |
| - } |
| - } |
| - } else if (space == OLD_POINTER_SPACE) { |
| - result = old_pointer_space_->AllocateRaw(size); |
| - } else { |
| - ASSERT(space == LO_SPACE); |
| - result = lo_space_->AllocateRawFixedArray(size); |
| - } |
| - return result; |
| + AllocationSpace retry_space = |
| + (size <= MaxObjectSizeInPagedSpace()) ? OLD_POINTER_SPACE : LO_SPACE; |
| + |
| + return AllocateRaw(size, space, retry_space); |
| } |
| @@ -3113,7 +3016,7 @@ Object* Heap::AllocateUninitializedFixedArray(int length) { |
| Object* Heap::AllocateHashTable(int length, PretenureFlag pretenure) { |
| Object* result = Heap::AllocateFixedArray(length, pretenure); |
| if (result->IsFailure()) return result; |
| - reinterpret_cast<Array*>(result)->set_map(hash_table_map()); |
| + reinterpret_cast<HeapObject*>(result)->set_map(hash_table_map()); |
| ASSERT(result->IsHashTable()); |
| return result; |
| } |
| @@ -3365,6 +3268,49 @@ bool Heap::InSpace(Address addr, AllocationSpace space) { |
| #ifdef DEBUG |
| +static void DummyScavengePointer(HeapObject** p) { |
| +} |
| + |
| + |
| +static void VerifyPointersUnderWatermark( |
| + PagedSpace* space, |
| + DirtyRegionCallback visit_dirty_region) { |
| + PageIterator it(space, PageIterator::PAGES_IN_USE); |
| + |
| + while (it.has_next()) { |
| + Page* page = it.next(); |
| + Address start = page->ObjectAreaStart(); |
| + Address end = page->AllocationWatermark(); |
| + |
| + Heap::IterateDirtyRegions(Page::kAllRegionsDirtyMarks, |
| + start, |
| + end, |
| + visit_dirty_region, |
| + &DummyScavengePointer); |
| + } |
| +} |
| + |
| + |
| +static void VerifyPointersUnderWatermark(LargeObjectSpace* space) { |
| + LargeObjectIterator it(space); |
| + for (HeapObject* object = it.next(); object != NULL; object = it.next()) { |
| + if (object->IsFixedArray()) { |
| + Address slot_address = object->address(); |
| + Address end = object->address() + object->Size(); |
| + |
| + while (slot_address < end) { |
| + HeapObject** slot = reinterpret_cast<HeapObject**>(slot_address); |
| + // When we are not in GC the Heap::InNewSpace() predicate |
| + // checks that pointers which satisfy predicate point into |
| + // the active semispace. |
| + Heap::InNewSpace(*slot); |
| + slot_address += kPointerSize; |
| + } |
| + } |
| + } |
| +} |
| + |
| + |
| void Heap::Verify() { |
| ASSERT(HasBeenSetup()); |
| @@ -3373,14 +3319,23 @@ void Heap::Verify() { |
| new_space_.Verify(); |
| - VerifyPointersAndRSetVisitor rset_visitor; |
| - old_pointer_space_->Verify(&rset_visitor); |
| - map_space_->Verify(&rset_visitor); |
| + VerifyPointersAndDirtyRegionsVisitor dirty_regions_visitor; |
| + old_pointer_space_->Verify(&dirty_regions_visitor); |
| + map_space_->Verify(&dirty_regions_visitor); |
| + |
| + VerifyPointersUnderWatermark(old_pointer_space_, |
| + &IteratePointersInDirtyRegion); |
| + VerifyPointersUnderWatermark(map_space_, |
| + &IteratePointersInDirtyMapsRegion); |
| + VerifyPointersUnderWatermark(lo_space_); |
| + |
|
Vyacheslav Egorov (Chromium)
2010/05/21 11:12:26
Improved heap verification here. Now all regions a
|
| + VerifyPageWatermarkValidity(old_pointer_space_, ALL_INVALID); |
| + VerifyPageWatermarkValidity(map_space_, ALL_INVALID); |
| - VerifyPointersVisitor no_rset_visitor; |
| - old_data_space_->Verify(&no_rset_visitor); |
| - code_space_->Verify(&no_rset_visitor); |
| - cell_space_->Verify(&no_rset_visitor); |
| + VerifyPointersVisitor no_dirty_regions_visitor; |
| + old_data_space_->Verify(&no_dirty_regions_visitor); |
| + code_space_->Verify(&no_dirty_regions_visitor); |
| + cell_space_->Verify(&no_dirty_regions_visitor); |
| lo_space_->Verify(); |
| } |
| @@ -3433,65 +3388,253 @@ void Heap::ZapFromSpace() { |
| #endif // DEBUG |
| -int Heap::IterateRSetRange(Address object_start, |
| - Address object_end, |
| - Address rset_start, |
| - ObjectSlotCallback copy_object_func) { |
| - Address object_address = object_start; |
| - Address rset_address = rset_start; |
| - int set_bits_count = 0; |
| - |
| - // Loop over all the pointers in [object_start, object_end). |
| - while (object_address < object_end) { |
| - uint32_t rset_word = Memory::uint32_at(rset_address); |
| - if (rset_word != 0) { |
| - uint32_t result_rset = rset_word; |
| - for (uint32_t bitmask = 1; bitmask != 0; bitmask = bitmask << 1) { |
| - // Do not dereference pointers at or past object_end. |
| - if ((rset_word & bitmask) != 0 && object_address < object_end) { |
| - Object** object_p = reinterpret_cast<Object**>(object_address); |
| - if (Heap::InNewSpace(*object_p)) { |
| - copy_object_func(reinterpret_cast<HeapObject**>(object_p)); |
| - } |
| - // If this pointer does not need to be remembered anymore, clear |
| - // the remembered set bit. |
| - if (!Heap::InNewSpace(*object_p)) result_rset &= ~bitmask; |
| - set_bits_count++; |
| - } |
| - object_address += kPointerSize; |
| +bool Heap::IteratePointersInDirtyRegion(Address start, |
| + Address end, |
| + ObjectSlotCallback copy_object_func) { |
| + Address slot_address = start; |
| + bool pointers_to_new_space_found = false; |
| + |
| + while (slot_address < end) { |
| + Object** slot = reinterpret_cast<Object**>(slot_address); |
| + if (Heap::InNewSpace(*slot)) { |
| + ASSERT((*slot)->IsHeapObject()); |
| + copy_object_func(reinterpret_cast<HeapObject**>(slot)); |
| + if (Heap::InNewSpace(*slot)) { |
| + ASSERT((*slot)->IsHeapObject()); |
| + pointers_to_new_space_found = true; |
| } |
| - // Update the remembered set if it has changed. |
| - if (result_rset != rset_word) { |
| - Memory::uint32_at(rset_address) = result_rset; |
| + } |
| + slot_address += kPointerSize; |
| + } |
| + return pointers_to_new_space_found; |
| +} |
| + |
| + |
| +// Compute start address of the first map following given addr. |
| +static inline Address MapStartAlign(Address addr) { |
| + Address page = Page::FromAddress(addr)->ObjectAreaStart(); |
| + return page + (((addr - page) + (Map::kSize - 1)) / Map::kSize * Map::kSize); |
| +} |
| + |
| + |
| +// Compute end address of the first map preceding given addr. |
| +static inline Address MapEndAlign(Address addr) { |
| + Address page = Page::FromAllocationTop(addr)->ObjectAreaStart(); |
| + return page + ((addr - page) / Map::kSize * Map::kSize); |
| +} |
| + |
| + |
| +static bool IteratePointersInDirtyMaps(Address start, |
| + Address end, |
| + ObjectSlotCallback copy_object_func) { |
| + ASSERT(MapStartAlign(start) == start); |
| + ASSERT(MapEndAlign(end) == end); |
| + |
| + Address map_address = start; |
| + bool pointers_to_new_space_found = false; |
| + |
| + while (map_address < end) { |
| + ASSERT(!Heap::InNewSpace(Memory::Object_at(map_address))); |
| + ASSERT(Memory::Object_at(map_address)->IsMap()); |
| + |
| + Address pointer_fields_start = map_address + Map::kPointerFieldsBeginOffset; |
| + Address pointer_fields_end = map_address + Map::kPointerFieldsEndOffset; |
| + |
| + if (Heap::IteratePointersInDirtyRegion(pointer_fields_start, |
| + pointer_fields_end, |
| + copy_object_func)) { |
| + pointers_to_new_space_found = true; |
| + } |
| + |
| + map_address += Map::kSize; |
| + } |
| + |
| + return pointers_to_new_space_found; |
| +} |
| + |
| + |
| +bool Heap::IteratePointersInDirtyMapsRegion( |
| + Address start, |
| + Address end, |
| + ObjectSlotCallback copy_object_func) { |
| + Address map_aligned_start = MapStartAlign(start); |
| + Address map_aligned_end = MapEndAlign(end); |
| + |
| + bool contains_pointers_to_new_space = false; |
| + |
| + if (map_aligned_start != start) { |
| + Address prev_map = map_aligned_start - Map::kSize; |
| + ASSERT(Memory::Object_at(prev_map)->IsMap()); |
| + |
| + Address pointer_fields_start = |
| + Max(start, prev_map + Map::kPointerFieldsBeginOffset); |
| + |
| + Address pointer_fields_end = |
| + Min(prev_map + Map::kCodeCacheOffset + kPointerSize, end); |
| + |
| + contains_pointers_to_new_space = |
| + IteratePointersInDirtyRegion(pointer_fields_start, |
| + pointer_fields_end, |
| + copy_object_func) |
| + || contains_pointers_to_new_space; |
| + } |
| + |
| + contains_pointers_to_new_space = |
| + IteratePointersInDirtyMaps(map_aligned_start, |
| + map_aligned_end, |
| + copy_object_func) |
| + || contains_pointers_to_new_space; |
| + |
| + if (map_aligned_end != end) { |
| + ASSERT(Memory::Object_at(map_aligned_end)->IsMap()); |
| + |
| + Address pointer_fields_start = map_aligned_end + Map::kPrototypeOffset; |
| + |
| + Address pointer_fields_end = |
| + Min(end, map_aligned_end + Map::kCodeCacheOffset + kPointerSize); |
| + |
| + contains_pointers_to_new_space = |
| + IteratePointersInDirtyRegion(pointer_fields_start, |
| + pointer_fields_end, |
| + copy_object_func) |
| + || contains_pointers_to_new_space; |
| + } |
| + |
| + return contains_pointers_to_new_space; |
| +} |
| + |
| + |
| +void Heap::IterateAndMarkPointersToNewSpace(Address start, |
| + Address end, |
| + ObjectSlotCallback callback) { |
| + Address slot_address = start; |
| + Page* page = Page::FromAddress(start); |
| + |
| + uint32_t marks = page->GetRegionMarks(); |
| + |
| + while (slot_address < end) { |
| + Object** slot = reinterpret_cast<Object**>(slot_address); |
| + if (Heap::InNewSpace(*slot)) { |
| + ASSERT((*slot)->IsHeapObject()); |
| + callback(reinterpret_cast<HeapObject**>(slot)); |
| + if (Heap::InNewSpace(*slot)) { |
| + ASSERT((*slot)->IsHeapObject()); |
| + marks |= page->GetRegionMaskForAddress(slot_address); |
| + } |
| + } |
| + slot_address += kPointerSize; |
| + } |
| + |
| + page->SetRegionMarks(marks); |
| +} |
| + |
| + |
| +uint32_t Heap::IterateDirtyRegions( |
| + uint32_t marks, |
| + Address area_start, |
| + Address area_end, |
| + DirtyRegionCallback visit_dirty_region, |
| + ObjectSlotCallback copy_object_func) { |
| + uint32_t newmarks = 0; |
| + uint32_t mask = 1; |
| + |
| + if (area_start >= area_end) { |
| + return newmarks; |
| + } |
| + |
| + Address region_start = area_start; |
| + |
| + // area_start does not necessarily coincide with start of the first region. |
| + // Thus to calculate the beginning of the next region we have to align |
| + // area_start by Page::kRegionSize. |
| + Address second_region = |
| + reinterpret_cast<Address>( |
| + reinterpret_cast<intptr_t>(area_start + Page::kRegionSize) & |
| + ~Page::kRegionAlignmentMask); |
| + |
| + // Next region might be beyond area_end. |
| + Address region_end = Min(second_region, area_end); |
| + |
| + if (marks & mask) { |
| + if (visit_dirty_region(region_start, region_end, copy_object_func)) { |
| + newmarks |= mask; |
| + } |
| + } |
| + mask <<= 1; |
| + |
| + // Iterate subsequent regions which fully lay inside [area_start, area_end[. |
| + region_start = region_end; |
| + region_end = region_start + Page::kRegionSize; |
| + |
| + while (region_end <= area_end) { |
| + if (marks & mask) { |
| + if (visit_dirty_region(region_start, region_end, copy_object_func)) { |
| + newmarks |= mask; |
| + } |
| + } |
| + |
| + region_start = region_end; |
| + region_end = region_start + Page::kRegionSize; |
| + |
| + mask <<= 1; |
| + } |
| + |
| + if (region_start != area_end) { |
| + // A small piece of area left uniterated because area_end does not coincide |
| + // with region end. Check whether region covering last part of area is |
| + // dirty. |
| + if (marks & mask) { |
| + if (visit_dirty_region(region_start, area_end, copy_object_func)) { |
| + newmarks |= mask; |
| } |
| - } else { |
| - // No bits in the word were set. This is the common case. |
| - object_address += kPointerSize * kBitsPerInt; |
| } |
| - rset_address += kIntSize; |
| } |
| - return set_bits_count; |
| + |
| + return newmarks; |
| } |
| -void Heap::IterateRSet(PagedSpace* space, ObjectSlotCallback copy_object_func) { |
| - ASSERT(Page::is_rset_in_use()); |
| - ASSERT(space == old_pointer_space_ || space == map_space_); |
| - static void* paged_rset_histogram = StatsTable::CreateHistogram( |
| - "V8.RSetPaged", |
| - 0, |
| - Page::kObjectAreaSize / kPointerSize, |
| - 30); |
| +void Heap::IterateDirtyRegions( |
| + PagedSpace* space, |
| + DirtyRegionCallback visit_dirty_region, |
| + ObjectSlotCallback copy_object_func, |
| + ExpectedPageWatermarkState expected_page_watermark_state) { |
| PageIterator it(space, PageIterator::PAGES_IN_USE); |
| + |
| while (it.has_next()) { |
| Page* page = it.next(); |
| - int count = IterateRSetRange(page->ObjectAreaStart(), page->AllocationTop(), |
| - page->RSetStart(), copy_object_func); |
| - if (paged_rset_histogram != NULL) { |
| - StatsTable::AddHistogramSample(paged_rset_histogram, count); |
| + uint32_t marks = page->GetRegionMarks(); |
| + |
| + if (marks != Page::kAllRegionsCleanMarks) { |
| + Address start = page->ObjectAreaStart(); |
| + |
| + // Do not try to visit pointers beyond page allocation watermark. |
| + // Page can contain garbage pointers there. |
| + Address end; |
| + |
| + if ((expected_page_watermark_state == WATERMARK_SHOULD_BE_VALID) || |
| + page->IsWatermarkValid()) { |
| + end = page->AllocationWatermark(); |
| + } else { |
| + end = page->CachedAllocationWatermark(); |
| + } |
| + |
| + ASSERT(space == old_pointer_space_ || |
| + (space == map_space_ && |
| + ((page->ObjectAreaStart() - end) % Map::kSize == 0))); |
| + |
| + page->SetRegionMarks(IterateDirtyRegions(marks, |
| + start, |
| + end, |
| + visit_dirty_region, |
| + copy_object_func)); |
| } |
| + |
| + // Mark page watermark as invalid to maintain watermark validity invariant. |
| + // See Page::FlipMeaningOfInvalidatedWatermarkFlag() for details. |
| + page->InvalidateWatermark(true); |
| } |
| } |