Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(32)

Side by Side Diff: src/heap/mark-compact.cc

Issue 1499893002: Reland "[heap] Refactor evacuation for young and old gen into visitors." (Closed) Base URL: https://chromium.googlesource.com/v8/v8.git@master
Patch Set: Added AlwaysAllocateScope Created 5 years ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « src/heap/mark-compact.h ('k') | no next file » | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "src/heap/mark-compact.h" 5 #include "src/heap/mark-compact.h"
6 6
7 #include "src/base/atomicops.h" 7 #include "src/base/atomicops.h"
8 #include "src/base/bits.h" 8 #include "src/base/bits.h"
9 #include "src/base/sys-info.h" 9 #include "src/base/sys-info.h"
10 #include "src/code-stubs.h" 10 #include "src/code-stubs.h"
(...skipping 218 matching lines...) Expand 10 before | Expand all | Expand 10 after
229 VerifyEvacuation(heap, heap->map_space()); 229 VerifyEvacuation(heap, heap->map_space());
230 VerifyEvacuation(heap->new_space()); 230 VerifyEvacuation(heap->new_space());
231 231
232 VerifyEvacuationVisitor visitor; 232 VerifyEvacuationVisitor visitor;
233 heap->IterateStrongRoots(&visitor, VISIT_ALL); 233 heap->IterateStrongRoots(&visitor, VISIT_ALL);
234 } 234 }
235 #endif // VERIFY_HEAP 235 #endif // VERIFY_HEAP
236 236
237 237
238 void MarkCompactCollector::SetUp() { 238 void MarkCompactCollector::SetUp() {
239 DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
240 DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
241 DCHECK(strcmp(Marking::kGreyBitPattern, "11") == 0);
242 DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
243
239 free_list_old_space_.Reset(new FreeList(heap_->old_space())); 244 free_list_old_space_.Reset(new FreeList(heap_->old_space()));
240 free_list_code_space_.Reset(new FreeList(heap_->code_space())); 245 free_list_code_space_.Reset(new FreeList(heap_->code_space()));
241 free_list_map_space_.Reset(new FreeList(heap_->map_space())); 246 free_list_map_space_.Reset(new FreeList(heap_->map_space()));
242 EnsureMarkingDequeIsReserved(); 247 EnsureMarkingDequeIsReserved();
243 EnsureMarkingDequeIsCommitted(kMinMarkingDequeSize); 248 EnsureMarkingDequeIsCommitted(kMinMarkingDequeSize);
244 slots_buffer_allocator_ = new SlotsBufferAllocator(); 249 slots_buffer_allocator_ = new SlotsBufferAllocator();
245 250
246 if (FLAG_flush_code) { 251 if (FLAG_flush_code) {
247 code_flusher_ = new CodeFlusher(isolate()); 252 code_flusher_ = new CodeFlusher(isolate());
248 if (FLAG_trace_code_flushing) { 253 if (FLAG_trace_code_flushing) {
(...skipping 1265 matching lines...) Expand 10 before | Expand all | Expand 10 after
1514 if (marking_deque()->IsFull()) return; 1519 if (marking_deque()->IsFull()) return;
1515 offset += 2; 1520 offset += 2;
1516 grey_objects >>= 2; 1521 grey_objects >>= 2;
1517 } 1522 }
1518 1523
1519 grey_objects >>= (Bitmap::kBitsPerCell - 1); 1524 grey_objects >>= (Bitmap::kBitsPerCell - 1);
1520 } 1525 }
1521 } 1526 }
1522 1527
1523 1528
1524 int MarkCompactCollector::DiscoverAndEvacuateBlackObjectsOnPage( 1529 class MarkCompactCollector::HeapObjectVisitor {
1525 NewSpace* new_space, NewSpacePage* p) { 1530 public:
1526 DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0); 1531 virtual ~HeapObjectVisitor() {}
1527 DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0); 1532 virtual bool Visit(HeapObject* object) = 0;
1528 DCHECK(strcmp(Marking::kGreyBitPattern, "11") == 0); 1533 };
1529 DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
1530 1534
1531 MarkBit::CellType* cells = p->markbits()->cells();
1532 int survivors_size = 0;
1533 1535
1534 for (MarkBitCellIterator it(p); !it.Done(); it.Advance()) { 1536 class MarkCompactCollector::EvacuateNewSpaceVisitor
1537 : public MarkCompactCollector::HeapObjectVisitor {
1538 public:
1539 explicit EvacuateNewSpaceVisitor(Heap* heap) : heap_(heap) {}
1540
1541 virtual bool Visit(HeapObject* object) {
1542 Heap::UpdateAllocationSiteFeedback(object, Heap::RECORD_SCRATCHPAD_SLOT);
1543 int size = object->Size();
1544
1545 // TODO(hpayer): Refactor EvacuateObject and call this function instead.
1546 if (heap_->ShouldBePromoted(object->address(), size) &&
1547 heap_->mark_compact_collector()->TryPromoteObject(object, size)) {
1548 return true;
1549 }
1550
1551 AllocationAlignment alignment = object->RequiredAlignment();
1552 AllocationResult allocation =
1553 heap_->new_space()->AllocateRaw(size, alignment);
1554 if (allocation.IsRetry()) {
1555 if (!heap_->new_space()->AddFreshPage()) {
1556 // Shouldn't happen. We are sweeping linearly, and to-space
1557 // has the same number of pages as from-space, so there is
1558 // always room unless we are in an OOM situation.
1559 FatalProcessOutOfMemory("MarkCompactCollector: semi-space copy\n");
1560 }
1561 allocation = heap_->new_space()->AllocateRaw(size, alignment);
1562 DCHECK(!allocation.IsRetry());
1563 }
1564 Object* target = allocation.ToObjectChecked();
1565
1566 heap_->mark_compact_collector()->MigrateObject(
1567 HeapObject::cast(target), object, size, NEW_SPACE, nullptr);
1568 if (V8_UNLIKELY(target->IsJSArrayBuffer())) {
1569 heap_->array_buffer_tracker()->MarkLive(JSArrayBuffer::cast(target));
1570 }
1571 heap_->IncrementSemiSpaceCopiedObjectSize(size);
1572 return true;
1573 }
1574
1575 private:
1576 Heap* heap_;
1577 };
1578
1579
1580 class MarkCompactCollector::EvacuateOldSpaceVisitor
1581 : public MarkCompactCollector::HeapObjectVisitor {
1582 public:
1583 EvacuateOldSpaceVisitor(Heap* heap,
1584 CompactionSpaceCollection* compaction_spaces,
1585 SlotsBuffer** evacuation_slots_buffer)
1586 : heap_(heap),
1587 compaction_spaces_(compaction_spaces),
1588 evacuation_slots_buffer_(evacuation_slots_buffer) {}
1589
1590 virtual bool Visit(HeapObject* object) {
1591 int size = object->Size();
1592 AllocationAlignment alignment = object->RequiredAlignment();
1593 HeapObject* target_object = nullptr;
1594 AllocationSpace id =
1595 Page::FromAddress(object->address())->owner()->identity();
1596 AllocationResult allocation =
1597 compaction_spaces_->Get(id)->AllocateRaw(size, alignment);
1598 if (!allocation.To(&target_object)) {
1599 return false;
1600 }
1601 heap_->mark_compact_collector()->MigrateObject(
1602 target_object, object, size, id, evacuation_slots_buffer_);
1603 DCHECK(object->map_word().IsForwardingAddress());
1604 return true;
1605 }
1606
1607 private:
1608 Heap* heap_;
1609 CompactionSpaceCollection* compaction_spaces_;
1610 SlotsBuffer** evacuation_slots_buffer_;
1611 };
1612
1613
1614 bool MarkCompactCollector::IterateLiveObjectsOnPage(MemoryChunk* page,
1615 HeapObjectVisitor* visitor,
1616 IterationMode mode) {
1617 Address offsets[16];
1618 for (MarkBitCellIterator it(page); !it.Done(); it.Advance()) {
1535 Address cell_base = it.CurrentCellBase(); 1619 Address cell_base = it.CurrentCellBase();
1536 MarkBit::CellType* cell = it.CurrentCell(); 1620 MarkBit::CellType* cell = it.CurrentCell();
1537 1621
1538 MarkBit::CellType current_cell = *cell; 1622 if (*cell == 0) continue;
1539 if (current_cell == 0) continue;
1540 1623
1541 int offset = 0; 1624 int live_objects = MarkWordToObjectStarts(*cell, cell_base, offsets);
1542 while (current_cell != 0) { 1625 for (int i = 0; i < live_objects; i++) {
1543 int trailing_zeros = base::bits::CountTrailingZeros32(current_cell); 1626 HeapObject* object = HeapObject::FromAddress(offsets[i]);
1544 current_cell >>= trailing_zeros;
1545 offset += trailing_zeros;
1546 Address address = cell_base + offset * kPointerSize;
1547 HeapObject* object = HeapObject::FromAddress(address);
1548 DCHECK(Marking::IsBlack(Marking::MarkBitFrom(object))); 1627 DCHECK(Marking::IsBlack(Marking::MarkBitFrom(object)));
1549 1628 if (!visitor->Visit(object)) {
1550 int size = object->Size(); 1629 if ((mode == kClearMarkbits) && (i > 0)) {
1551 survivors_size += size; 1630 page->markbits()->ClearRange(
1552 1631 page->AddressToMarkbitIndex(page->area_start()),
1553 Heap::UpdateAllocationSiteFeedback(object, Heap::RECORD_SCRATCHPAD_SLOT); 1632 page->AddressToMarkbitIndex(offsets[i]));
1554 1633 }
1555 offset += 2; 1634 return false;
1556 current_cell >>= 2;
1557
1558 // TODO(hpayer): Refactor EvacuateObject and call this function instead.
1559 if (heap()->ShouldBePromoted(object->address(), size) &&
1560 TryPromoteObject(object, size)) {
1561 continue;
1562 } 1635 }
1563
1564 AllocationAlignment alignment = object->RequiredAlignment();
1565 AllocationResult allocation = new_space->AllocateRaw(size, alignment);
1566 if (allocation.IsRetry()) {
1567 if (!new_space->AddFreshPage()) {
1568 // Shouldn't happen. We are sweeping linearly, and to-space
1569 // has the same number of pages as from-space, so there is
1570 // always room unless we are in an OOM situation.
1571 FatalProcessOutOfMemory("MarkCompactCollector: semi-space copy\n");
1572 }
1573 allocation = new_space->AllocateRaw(size, alignment);
1574 DCHECK(!allocation.IsRetry());
1575 }
1576 Object* target = allocation.ToObjectChecked();
1577
1578 MigrateObject(HeapObject::cast(target), object, size, NEW_SPACE, nullptr);
1579 if (V8_UNLIKELY(target->IsJSArrayBuffer())) {
1580 heap()->array_buffer_tracker()->MarkLive(JSArrayBuffer::cast(target));
1581 }
1582 heap()->IncrementSemiSpaceCopiedObjectSize(size);
1583 } 1636 }
1584 *cells = 0; 1637 if (mode == kClearMarkbits) {
1638 *cell = 0;
1639 }
1585 } 1640 }
1586 return survivors_size; 1641 return true;
1587 } 1642 }
1588 1643
1589 1644
1590 void MarkCompactCollector::DiscoverGreyObjectsInSpace(PagedSpace* space) { 1645 void MarkCompactCollector::DiscoverGreyObjectsInSpace(PagedSpace* space) {
1591 PageIterator it(space); 1646 PageIterator it(space);
1592 while (it.has_next()) { 1647 while (it.has_next()) {
1593 Page* p = it.next(); 1648 Page* p = it.next();
1594 DiscoverGreyObjectsOnPage(p); 1649 DiscoverGreyObjectsOnPage(p);
1595 if (marking_deque()->IsFull()) return; 1650 if (marking_deque()->IsFull()) return;
1596 } 1651 }
(...skipping 1529 matching lines...) Expand 10 before | Expand all | Expand 10 after
3126 new_space->Flip(); 3181 new_space->Flip();
3127 new_space->ResetAllocationInfo(); 3182 new_space->ResetAllocationInfo();
3128 3183
3129 int survivors_size = 0; 3184 int survivors_size = 0;
3130 3185
3131 // First pass: traverse all objects in inactive semispace, remove marks, 3186 // First pass: traverse all objects in inactive semispace, remove marks,
3132 // migrate live objects and write forwarding addresses. This stage puts 3187 // migrate live objects and write forwarding addresses. This stage puts
3133 // new entries in the store buffer and may cause some pages to be marked 3188 // new entries in the store buffer and may cause some pages to be marked
3134 // scan-on-scavenge. 3189 // scan-on-scavenge.
3135 NewSpacePageIterator it(from_bottom, from_top); 3190 NewSpacePageIterator it(from_bottom, from_top);
3191 EvacuateNewSpaceVisitor new_space_visitor(heap());
3136 while (it.has_next()) { 3192 while (it.has_next()) {
3137 NewSpacePage* p = it.next(); 3193 NewSpacePage* p = it.next();
3138 survivors_size += DiscoverAndEvacuateBlackObjectsOnPage(new_space, p); 3194 survivors_size += p->LiveBytes();
3195 bool ok = IterateLiveObjectsOnPage(p, &new_space_visitor, kClearMarkbits);
3196 USE(ok);
3197 DCHECK(ok);
3139 } 3198 }
3140 3199
3141 heap_->IncrementYoungSurvivorsCounter(survivors_size); 3200 heap_->IncrementYoungSurvivorsCounter(survivors_size);
3142 new_space->set_age_mark(new_space->top()); 3201 new_space->set_age_mark(new_space->top());
3143 } 3202 }
3144 3203
3145 3204
3146 void MarkCompactCollector::AddEvacuationSlotsBufferSynchronized( 3205 void MarkCompactCollector::AddEvacuationSlotsBufferSynchronized(
3147 SlotsBuffer* evacuation_slots_buffer) { 3206 SlotsBuffer* evacuation_slots_buffer) {
3148 base::LockGuard<base::Mutex> lock_guard(&evacuation_slots_buffers_mutex_); 3207 base::LockGuard<base::Mutex> lock_guard(&evacuation_slots_buffers_mutex_);
3149 evacuation_slots_buffers_.Add(evacuation_slots_buffer); 3208 evacuation_slots_buffers_.Add(evacuation_slots_buffer);
3150 } 3209 }
3151 3210
3152 3211
3153 bool MarkCompactCollector::EvacuateLiveObjectsFromPage(
3154 Page* p, PagedSpace* target_space, SlotsBuffer** evacuation_slots_buffer) {
3155 AlwaysAllocateScope always_allocate(isolate());
3156 DCHECK(p->IsEvacuationCandidate() && !p->WasSwept());
3157
3158 Address starts[16];
3159 for (MarkBitCellIterator it(p); !it.Done(); it.Advance()) {
3160 Address cell_base = it.CurrentCellBase();
3161 MarkBit::CellType* cell = it.CurrentCell();
3162
3163 if (*cell == 0) continue;
3164
3165 int live_objects = MarkWordToObjectStarts(*cell, cell_base, starts);
3166 for (int i = 0; i < live_objects; i++) {
3167 HeapObject* object = HeapObject::FromAddress(starts[i]);
3168 DCHECK(Marking::IsBlack(Marking::MarkBitFrom(object)));
3169
3170 int size = object->Size();
3171 AllocationAlignment alignment = object->RequiredAlignment();
3172 HeapObject* target_object = nullptr;
3173 AllocationResult allocation = target_space->AllocateRaw(size, alignment);
3174 if (!allocation.To(&target_object)) {
3175 // We need to abort compaction for this page. Make sure that we reset
3176 // the mark bits for objects that have already been migrated.
3177 if (i > 0) {
3178 p->markbits()->ClearRange(p->AddressToMarkbitIndex(p->area_start()),
3179 p->AddressToMarkbitIndex(starts[i]));
3180 }
3181 return false;
3182 }
3183
3184 MigrateObject(target_object, object, size, target_space->identity(),
3185 evacuation_slots_buffer);
3186 DCHECK(object->map_word().IsForwardingAddress());
3187 }
3188
3189 // Clear marking bits for current cell.
3190 *cell = 0;
3191 }
3192 p->ResetLiveBytes();
3193 return true;
3194 }
3195
3196
3197 int MarkCompactCollector::NumberOfParallelCompactionTasks() { 3212 int MarkCompactCollector::NumberOfParallelCompactionTasks() {
3198 if (!FLAG_parallel_compaction) return 1; 3213 if (!FLAG_parallel_compaction) return 1;
3199 // Compute the number of needed tasks based on a target compaction time, the 3214 // Compute the number of needed tasks based on a target compaction time, the
3200 // profiled compaction speed and marked live memory. 3215 // profiled compaction speed and marked live memory.
3201 // 3216 //
3202 // The number of parallel compaction tasks is limited by: 3217 // The number of parallel compaction tasks is limited by:
3203 // - #evacuation pages 3218 // - #evacuation pages
3204 // - (#cores - 1) 3219 // - (#cores - 1)
3205 // - a hard limit 3220 // - a hard limit
3206 const double kTargetCompactionTimeInMs = 1; 3221 const double kTargetCompactionTimeInMs = 1;
(...skipping 144 matching lines...) Expand 10 before | Expand all | Expand 10 after
3351 pending_compaction_tasks_semaphore_.Wait(); 3366 pending_compaction_tasks_semaphore_.Wait();
3352 } 3367 }
3353 } 3368 }
3354 compaction_in_progress_ = false; 3369 compaction_in_progress_ = false;
3355 } 3370 }
3356 3371
3357 3372
3358 void MarkCompactCollector::EvacuatePages( 3373 void MarkCompactCollector::EvacuatePages(
3359 CompactionSpaceCollection* compaction_spaces, 3374 CompactionSpaceCollection* compaction_spaces,
3360 SlotsBuffer** evacuation_slots_buffer) { 3375 SlotsBuffer** evacuation_slots_buffer) {
3376 EvacuateOldSpaceVisitor visitor(heap(), compaction_spaces,
3377 evacuation_slots_buffer);
3361 for (int i = 0; i < evacuation_candidates_.length(); i++) { 3378 for (int i = 0; i < evacuation_candidates_.length(); i++) {
3362 Page* p = evacuation_candidates_[i]; 3379 Page* p = evacuation_candidates_[i];
3363 DCHECK(p->IsEvacuationCandidate() || 3380 DCHECK(p->IsEvacuationCandidate() ||
3364 p->IsFlagSet(Page::RESCAN_ON_EVACUATION)); 3381 p->IsFlagSet(Page::RESCAN_ON_EVACUATION));
3365 DCHECK(static_cast<int>(p->parallel_sweeping_state().Value()) == 3382 DCHECK(static_cast<int>(p->parallel_sweeping_state().Value()) ==
3366 MemoryChunk::kSweepingDone); 3383 MemoryChunk::kSweepingDone);
3367 if (p->parallel_compaction_state().TrySetValue( 3384 if (p->parallel_compaction_state().TrySetValue(
3368 MemoryChunk::kCompactingDone, MemoryChunk::kCompactingInProgress)) { 3385 MemoryChunk::kCompactingDone, MemoryChunk::kCompactingInProgress)) {
3369 if (p->IsEvacuationCandidate()) { 3386 if (p->IsEvacuationCandidate()) {
3370 DCHECK_EQ(p->parallel_compaction_state().Value(), 3387 DCHECK_EQ(p->parallel_compaction_state().Value(),
3371 MemoryChunk::kCompactingInProgress); 3388 MemoryChunk::kCompactingInProgress);
3372 double start = heap()->MonotonicallyIncreasingTimeInMs(); 3389 double start = heap()->MonotonicallyIncreasingTimeInMs();
3373 intptr_t live_bytes = p->LiveBytes(); 3390 intptr_t live_bytes = p->LiveBytes();
3374 if (EvacuateLiveObjectsFromPage( 3391 AlwaysAllocateScope always_allocate(isolate());
Michael Lippautz 2015/12/04 11:29:35 I am open for suggestions here: - I'd rather not a
Hannes Payer (out of office) 2015/12/04 12:00:41 Acknowledged.
3375 p, compaction_spaces->Get(p->owner()->identity()), 3392 if (IterateLiveObjectsOnPage(p, &visitor, kClearMarkbits)) {
3376 evacuation_slots_buffer)) {
3377 p->parallel_compaction_state().SetValue( 3393 p->parallel_compaction_state().SetValue(
3378 MemoryChunk::kCompactingFinalize); 3394 MemoryChunk::kCompactingFinalize);
3379 compaction_spaces->ReportCompactionProgress( 3395 compaction_spaces->ReportCompactionProgress(
3380 heap()->MonotonicallyIncreasingTimeInMs() - start, live_bytes); 3396 heap()->MonotonicallyIncreasingTimeInMs() - start, live_bytes);
3381 } else { 3397 } else {
3382 p->parallel_compaction_state().SetValue( 3398 p->parallel_compaction_state().SetValue(
3383 MemoryChunk::kCompactingAborted); 3399 MemoryChunk::kCompactingAborted);
3384 } 3400 }
3401 p->ResetLiveBytes();
Michael Lippautz 2015/12/04 11:29:35 Slight inconsistency in the old refactoring. We al
Hannes Payer (out of office) 2015/12/04 12:00:41 Careful here, he still want to account for live ob
Michael Lippautz 2015/12/04 13:29:49 Done.
3385 } else { 3402 } else {
3386 // There could be popular pages in the list of evacuation candidates 3403 // There could be popular pages in the list of evacuation candidates
3387 // which we do compact. 3404 // which we do compact.
3388 p->parallel_compaction_state().SetValue(MemoryChunk::kCompactingDone); 3405 p->parallel_compaction_state().SetValue(MemoryChunk::kCompactingDone);
3389 } 3406 }
3390 } 3407 }
3391 } 3408 }
3392 } 3409 }
3393 3410
3394 3411
(...skipping 743 matching lines...) Expand 10 before | Expand all | Expand 10 after
4138 MarkBit mark_bit = Marking::MarkBitFrom(host); 4155 MarkBit mark_bit = Marking::MarkBitFrom(host);
4139 if (Marking::IsBlack(mark_bit)) { 4156 if (Marking::IsBlack(mark_bit)) {
4140 RelocInfo rinfo(isolate(), pc, RelocInfo::CODE_TARGET, 0, host); 4157 RelocInfo rinfo(isolate(), pc, RelocInfo::CODE_TARGET, 0, host);
4141 RecordRelocSlot(&rinfo, target); 4158 RecordRelocSlot(&rinfo, target);
4142 } 4159 }
4143 } 4160 }
4144 } 4161 }
4145 4162
4146 } // namespace internal 4163 } // namespace internal
4147 } // namespace v8 4164 } // namespace v8
OLDNEW
« no previous file with comments | « src/heap/mark-compact.h ('k') | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698