Chromium Code Reviews| Index: src/mark-compact.cc |
| diff --git a/src/mark-compact.cc b/src/mark-compact.cc |
| index 51b1008b6babb1f5a39718eb1c76555b2817f1b2..20923dc598b33b0a8e48a8a73608d15ea745ba3e 100644 |
| --- a/src/mark-compact.cc |
| +++ b/src/mark-compact.cc |
| @@ -1989,6 +1989,80 @@ static void DiscoverGreyObjectsOnPage(MarkingDeque* marking_deque, |
| } |
| +int MarkCompactCollector::DiscoverAndPromoteBlackObjectsOnPage( |
| + NewSpace* new_space, |
| + NewSpacePage* p) { |
|
titzer
2013/07/11 10:05:33
Please add a TODO here and in the other places whi
Hannes Payer (out of office)
2013/07/11 11:14:17
I am not adding the todo, I am building the iterat
|
| + ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0); |
| + ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0); |
| + ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0); |
| + ASSERT(strcmp(Marking::kImpossibleBitPattern, "01") == 0); |
| + |
| + MarkBit::CellType* cells = p->markbits()->cells(); |
| + int survivors_size = 0; |
| + |
| + int last_cell_index = |
| + Bitmap::IndexToCell( |
| + Bitmap::CellAlignIndex( |
| + p->AddressToMarkbitIndex(p->area_end()))); |
| + |
| + Address cell_base = p->area_start(); |
| + int cell_index = Bitmap::IndexToCell( |
| + Bitmap::CellAlignIndex( |
| + p->AddressToMarkbitIndex(cell_base))); |
| + |
| + for (; |
| + cell_index < last_cell_index; |
| + cell_index++, cell_base += 32 * kPointerSize) { |
| + ASSERT(static_cast<unsigned>(cell_index) == |
| + Bitmap::IndexToCell( |
| + Bitmap::CellAlignIndex( |
| + p->AddressToMarkbitIndex(cell_base)))); |
| + |
| + const MarkBit::CellType current_cell = cells[cell_index]; |
| + if (current_cell == 0) continue; |
| + |
| + for (unsigned int i = CompilerIntrinsics::CountTrailingZeros(current_cell); |
|
Hannes Payer (out of office)
2013/07/11 11:14:17
I changed that part, could you have another look?
|
| + i < Bitmap::kBitsPerCell; |
| + i++) { |
| + MarkBit markbit(&cells[cell_index], 1 << i, false); |
| + if (markbit.Get()) { |
| + Address address = cell_base + i * kPointerSize; |
| + HeapObject* object = HeapObject::FromAddress(address); |
| + |
| + int size = object->Size(); |
| + survivors_size += size; |
| + |
| + // Aggressively promote young survivors to the old space. |
| + if (TryPromoteObject(object, size)) { |
| + continue; |
| + } |
| + |
| + // Promotion failed. Just migrate object to another semispace. |
| + MaybeObject* allocation = new_space->AllocateRaw(size); |
| + if (allocation->IsFailure()) { |
| + if (!new_space->AddFreshPage()) { |
| + // Shouldn't happen. We are sweeping linearly, and to-space |
| + // has the same number of pages as from-space, so there is |
| + // always room. |
| + UNREACHABLE(); |
| + } |
| + allocation = new_space->AllocateRaw(size); |
| + ASSERT(!allocation->IsFailure()); |
| + } |
| + Object* target = allocation->ToObjectUnchecked(); |
| + |
| + MigrateObject(HeapObject::cast(target)->address(), |
| + object->address(), |
| + size, |
| + NEW_SPACE); |
| + } |
| + } |
| + cells[cell_index] = 0; |
| + } |
| + return survivors_size; |
| +} |
| + |
| + |
| static void DiscoverGreyObjectsInSpace(Heap* heap, |
| MarkingDeque* marking_deque, |
| PagedSpace* space) { |
| @@ -2895,42 +2969,10 @@ void MarkCompactCollector::EvacuateNewSpace() { |
| // migrate live objects and write forwarding addresses. This stage puts |
| // new entries in the store buffer and may cause some pages to be marked |
| // scan-on-scavenge. |
| - SemiSpaceIterator from_it(from_bottom, from_top); |
| - for (HeapObject* object = from_it.Next(); |
| - object != NULL; |
| - object = from_it.Next()) { |
| - MarkBit mark_bit = Marking::MarkBitFrom(object); |
| - if (mark_bit.Get()) { |
| - mark_bit.Clear(); |
| - // Don't bother decrementing live bytes count. We'll discard the |
| - // entire page at the end. |
| - int size = object->Size(); |
| - survivors_size += size; |
| - |
| - // Aggressively promote young survivors to the old space. |
| - if (TryPromoteObject(object, size)) { |
| - continue; |
| - } |
| - |
| - // Promotion failed. Just migrate object to another semispace. |
| - MaybeObject* allocation = new_space->AllocateRaw(size); |
| - if (allocation->IsFailure()) { |
| - if (!new_space->AddFreshPage()) { |
| - // Shouldn't happen. We are sweeping linearly, and to-space |
| - // has the same number of pages as from-space, so there is |
| - // always room. |
| - UNREACHABLE(); |
| - } |
| - allocation = new_space->AllocateRaw(size); |
| - ASSERT(!allocation->IsFailure()); |
| - } |
| - Object* target = allocation->ToObjectUnchecked(); |
| - |
| - MigrateObject(HeapObject::cast(target)->address(), |
| - object->address(), |
| - size, |
| - NEW_SPACE); |
| - } |
| + NewSpacePageIterator it(from_bottom, from_top); |
| + while (it.has_next()) { |
| + NewSpacePage* p = it.next(); |
| + survivors_size += DiscoverAndPromoteBlackObjectsOnPage(new_space, p); |
| } |
| heap_->IncrementYoungSurvivorsCounter(survivors_size); |