Index: src/heap/heap.cc |
diff --git a/src/heap/heap.cc b/src/heap/heap.cc |
index 8ce59d53a339ac2a3f990d0ca8321fd7de8a1d62..b16778f86f596662d35087d7e390cd88c1957254 100644 |
--- a/src/heap/heap.cc |
+++ b/src/heap/heap.cc |
@@ -3066,6 +3066,16 @@ void Heap::CreateFillerObjectAt(Address addr, int size, |
if (mode == ClearRecordedSlots::kYes) { |
ClearRecordedSlotRange(addr, addr + size); |
} |
+ |
+ // If the location where the filler is created is within a black area we have |
+ // to clear the mark bits of the filler space. |
+ if (incremental_marking()->black_allocation() && |
+ Marking::IsBlackOrGrey(ObjectMarking::MarkBitFrom(addr))) { |
+ Page* page = Page::FromAddress(addr); |
+ page->markbits()->ClearRange(page->AddressToMarkbitIndex(addr), |
+ page->AddressToMarkbitIndex(addr + size)); |
+ } |
+ |
// At this point, we may be deserializing the heap from a snapshot, and |
// none of the maps have been created yet and are NULL. |
DCHECK((filler->map() == NULL && !deserialization_complete_) || |
@@ -3136,13 +3146,20 @@ FixedArrayBase* Heap::LeftTrimFixedArray(FixedArrayBase* object, |
DCHECK(elements_to_trim <= len); |
// Calculate location of new array start. |
- Address new_start = object->address() + bytes_to_trim; |
+ Address old_start = object->address(); |
+ Address new_start = old_start + bytes_to_trim; |
+ |
+ // Transfer the mark bits to their new location if the object is not within |
+ // a black area. |
+ if (!incremental_marking()->black_allocation() || |
+ !Marking::IsBlack(ObjectMarking::MarkBitFrom(new_start))) { |
+ IncrementalMarking::TransferMark(this, old_start, new_start); |
+ } |
// Technically in new space this write might be omitted (except for |
// debug mode which iterates through the heap), but to play safer |
// we still do it. |
- CreateFillerObjectAt(object->address(), bytes_to_trim, |
- ClearRecordedSlots::kYes); |
+ CreateFillerObjectAt(old_start, bytes_to_trim, ClearRecordedSlots::kYes); |
// Initialize header of the trimmed array. Since left trimming is only |
// performed on pages which are not concurrently swept creating a filler |
// object does not require synchronization. |
@@ -3151,18 +3168,18 @@ FixedArrayBase* Heap::LeftTrimFixedArray(FixedArrayBase* object, |
int new_start_index = elements_to_trim * (element_size / kPointerSize); |
former_start[new_start_index] = map; |
former_start[new_start_index + 1] = Smi::FromInt(len - elements_to_trim); |
+ |
FixedArrayBase* new_object = |
FixedArrayBase::cast(HeapObject::FromAddress(new_start)); |
+ // Maintain consistency of live bytes during incremental marking |
+ AdjustLiveBytes(new_object, -bytes_to_trim, Heap::CONCURRENT_TO_SWEEPER); |
+ |
// Remove recorded slots for the new map and length offset. |
ClearRecordedSlot(new_object, HeapObject::RawField(new_object, 0)); |
ClearRecordedSlot(new_object, HeapObject::RawField( |
new_object, FixedArrayBase::kLengthOffset)); |
- // Maintain consistency of live bytes during incremental marking |
- IncrementalMarking::TransferMark(this, object->address(), new_start); |
- AdjustLiveBytes(new_object, -bytes_to_trim, Heap::CONCURRENT_TO_SWEEPER); |
- |
// Notify the heap profiler of change in object layout. |
OnMoveEvent(new_object, object, new_object->Size()); |
return new_object; |
@@ -4177,14 +4194,13 @@ void Heap::RegisterReservationsForBlackAllocation(Reservation* reservations) { |
// Hence we have to color all objects of the reservation first black to avoid |
// unnecessary marking deque load. |
if (incremental_marking()->black_allocation()) { |
- for (int i = CODE_SPACE; i < Serializer::kNumberOfSpaces; i++) { |
+ for (int i = OLD_SPACE; i < Serializer::kNumberOfSpaces; i++) { |
const Heap::Reservation& res = reservations[i]; |
for (auto& chunk : res) { |
Address addr = chunk.start; |
while (addr < chunk.end) { |
HeapObject* obj = HeapObject::FromAddress(addr); |
Marking::MarkBlack(ObjectMarking::MarkBitFrom(obj)); |
- MemoryChunk::IncrementLiveBytesFromGC(obj, obj->Size()); |
addr += obj->Size(); |
} |
} |