Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(5)

Side by Side Diff: src/heap/mark-compact.cc

Issue 1470253002: [heap] Refactor evacuation for young and old gen into visitors. (Closed) Base URL: https://chromium.googlesource.com/v8/v8.git@master
Patch Set: Created 5 years ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « src/heap/mark-compact.h ('k') | no next file » | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "src/heap/mark-compact.h" 5 #include "src/heap/mark-compact.h"
6 6
7 #include "src/base/atomicops.h" 7 #include "src/base/atomicops.h"
8 #include "src/base/bits.h" 8 #include "src/base/bits.h"
9 #include "src/base/sys-info.h" 9 #include "src/base/sys-info.h"
10 #include "src/code-stubs.h" 10 #include "src/code-stubs.h"
(...skipping 218 matching lines...) Expand 10 before | Expand all | Expand 10 after
229 VerifyEvacuation(heap, heap->map_space()); 229 VerifyEvacuation(heap, heap->map_space());
230 VerifyEvacuation(heap->new_space()); 230 VerifyEvacuation(heap->new_space());
231 231
232 VerifyEvacuationVisitor visitor; 232 VerifyEvacuationVisitor visitor;
233 heap->IterateStrongRoots(&visitor, VISIT_ALL); 233 heap->IterateStrongRoots(&visitor, VISIT_ALL);
234 } 234 }
235 #endif // VERIFY_HEAP 235 #endif // VERIFY_HEAP
236 236
237 237
238 void MarkCompactCollector::SetUp() { 238 void MarkCompactCollector::SetUp() {
239 DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
240 DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
241 DCHECK(strcmp(Marking::kGreyBitPattern, "11") == 0);
242 DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
243
239 free_list_old_space_.Reset(new FreeList(heap_->old_space())); 244 free_list_old_space_.Reset(new FreeList(heap_->old_space()));
240 free_list_code_space_.Reset(new FreeList(heap_->code_space())); 245 free_list_code_space_.Reset(new FreeList(heap_->code_space()));
241 free_list_map_space_.Reset(new FreeList(heap_->map_space())); 246 free_list_map_space_.Reset(new FreeList(heap_->map_space()));
242 EnsureMarkingDequeIsReserved(); 247 EnsureMarkingDequeIsReserved();
243 EnsureMarkingDequeIsCommitted(kMinMarkingDequeSize); 248 EnsureMarkingDequeIsCommitted(kMinMarkingDequeSize);
244 slots_buffer_allocator_ = new SlotsBufferAllocator(); 249 slots_buffer_allocator_ = new SlotsBufferAllocator();
245 } 250 }
246 251
247 252
248 void MarkCompactCollector::TearDown() { 253 void MarkCompactCollector::TearDown() {
(...skipping 1288 matching lines...) Expand 10 before | Expand all | Expand 10 after
1537 if (marking_deque()->IsFull()) return; 1542 if (marking_deque()->IsFull()) return;
1538 offset += 2; 1543 offset += 2;
1539 grey_objects >>= 2; 1544 grey_objects >>= 2;
1540 } 1545 }
1541 1546
1542 grey_objects >>= (Bitmap::kBitsPerCell - 1); 1547 grey_objects >>= (Bitmap::kBitsPerCell - 1);
1543 } 1548 }
1544 } 1549 }
1545 1550
1546 1551
1547 int MarkCompactCollector::DiscoverAndEvacuateBlackObjectsOnPage( 1552 class MarkCompactCollector::HeapObjectVisitor {
1548 NewSpace* new_space, NewSpacePage* p) { 1553 public:
1549 DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0); 1554 virtual ~HeapObjectVisitor() {}
1550 DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0); 1555 virtual bool Visit(HeapObject* object) = 0;
1551 DCHECK(strcmp(Marking::kGreyBitPattern, "11") == 0); 1556 };
1552 DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
1553 1557
1554 MarkBit::CellType* cells = p->markbits()->cells();
1555 int survivors_size = 0;
1556 1558
1557 for (MarkBitCellIterator it(p); !it.Done(); it.Advance()) { 1559 class MarkCompactCollector::EvacuateNewSpaceVisitor
1560 : public MarkCompactCollector::HeapObjectVisitor {
1561 public:
1562 explicit EvacuateNewSpaceVisitor(Heap* heap) : heap_(heap) {}
1563
1564 virtual bool Visit(HeapObject* object) {
1565 Heap::UpdateAllocationSiteFeedback(object, Heap::RECORD_SCRATCHPAD_SLOT);
1566 int size = object->Size();
1567
1568 // TODO(hpayer): Refactor EvacuateObject and call this function instead.
1569 if (heap_->ShouldBePromoted(object->address(), size) &&
1570 heap_->mark_compact_collector()->TryPromoteObject(object, size)) {
1571 return true;
1572 }
1573
1574 AllocationAlignment alignment = object->RequiredAlignment();
1575 AllocationResult allocation =
1576 heap_->new_space()->AllocateRaw(size, alignment);
1577 if (allocation.IsRetry()) {
1578 if (!heap_->new_space()->AddFreshPage()) {
1579 // Shouldn't happen. We are sweeping linearly, and to-space
1580 // has the same number of pages as from-space, so there is
1581 // always room unless we are in an OOM situation.
1582 FatalProcessOutOfMemory("MarkCompactCollector: semi-space copy\n");
1583 }
1584 allocation = heap_->new_space()->AllocateRaw(size, alignment);
1585 DCHECK(!allocation.IsRetry());
1586 }
1587 Object* target = allocation.ToObjectChecked();
1588
1589 heap_->mark_compact_collector()->MigrateObject(
1590 HeapObject::cast(target), object, size, NEW_SPACE, nullptr);
1591 if (V8_UNLIKELY(target->IsJSArrayBuffer())) {
1592 heap_->array_buffer_tracker()->MarkLive(JSArrayBuffer::cast(target));
1593 }
1594 heap_->IncrementSemiSpaceCopiedObjectSize(size);
1595 return true;
1596 }
1597
1598 private:
1599 Heap* heap_;
1600 };
1601
1602
1603 class MarkCompactCollector::EvacuateOldSpaceVisitor
1604 : public MarkCompactCollector::HeapObjectVisitor {
1605 public:
1606 EvacuateOldSpaceVisitor(Heap* heap,
1607 CompactionSpaceCollection* compaction_spaces,
1608 SlotsBuffer** evacuation_slots_buffer)
1609 : heap_(heap),
1610 compaction_spaces_(compaction_spaces),
1611 evacuation_slots_buffer_(evacuation_slots_buffer),
1612 aborted_(false) {}
1613
1614 virtual bool Visit(HeapObject* object) {
1615 int size = object->Size();
1616 AllocationAlignment alignment = object->RequiredAlignment();
1617 HeapObject* target_object = nullptr;
1618 AllocationSpace id =
1619 Page::FromAddress(object->address())->owner()->identity();
1620 AllocationResult allocation =
1621 compaction_spaces_->Get(id)->AllocateRaw(size, alignment);
1622 if (!allocation.To(&target_object)) {
1623 aborted_ = true;
1624 return false;
1625 }
1626 heap_->mark_compact_collector()->MigrateObject(
1627 target_object, object, size, id, evacuation_slots_buffer_);
1628 DCHECK(object->map_word().IsForwardingAddress());
1629 return true;
1630 }
1631
1632 bool aborted() { return aborted_; }
1633 void reset_aborted() { aborted_ = false; }
1634
1635 private:
1636 Heap* heap_;
1637 CompactionSpaceCollection* compaction_spaces_;
1638 SlotsBuffer** evacuation_slots_buffer_;
1639 bool aborted_;
1640 };
1641
1642
1643 void MarkCompactCollector::IterateLiveObjectsOnPage(MemoryChunk* page,
1644 HeapObjectVisitor* visitor,
Hannes Payer (out of office) 2015/11/24 16:07:35 If this method returns a bool indicating if all ob
Michael Lippautz 2015/11/24 19:19:50 Done. Also added a comment in the .h file and a fu
1645 IterationMode mode) {
1646 int offsets[16];
1647 for (MarkBitCellIterator it(page); !it.Done(); it.Advance()) {
1558 Address cell_base = it.CurrentCellBase(); 1648 Address cell_base = it.CurrentCellBase();
1559 MarkBit::CellType* cell = it.CurrentCell(); 1649 MarkBit::CellType* cell = it.CurrentCell();
1560 1650
1561 MarkBit::CellType current_cell = *cell; 1651 if (*cell == 0) continue;
1562 if (current_cell == 0) continue;
1563 1652
1564 int offset = 0; 1653 int live_objects = MarkWordToObjectStarts(*cell, offsets);
1565 while (current_cell != 0) { 1654 for (int i = 0; i < live_objects; i++) {
1566 int trailing_zeros = base::bits::CountTrailingZeros32(current_cell); 1655 Address object_addr = cell_base + offsets[i] * kPointerSize;
1567 current_cell >>= trailing_zeros; 1656 HeapObject* object = HeapObject::FromAddress(object_addr);
1568 offset += trailing_zeros;
1569 Address address = cell_base + offset * kPointerSize;
1570 HeapObject* object = HeapObject::FromAddress(address);
1571 DCHECK(Marking::IsBlack(Marking::MarkBitFrom(object))); 1657 DCHECK(Marking::IsBlack(Marking::MarkBitFrom(object)));
1572 1658 if (!visitor->Visit(object)) {
1573 int size = object->Size(); 1659 if ((mode == kClearMarkbits) && (i > 0)) {
1574 survivors_size += size; 1660 page->markbits()->ClearRange(
1575 1661 page->AddressToMarkbitIndex(page->area_start()),
1576 Heap::UpdateAllocationSiteFeedback(object, Heap::RECORD_SCRATCHPAD_SLOT); 1662 page->AddressToMarkbitIndex(object_addr));
1577 1663 }
1578 offset += 2; 1664 return;
1579 current_cell >>= 2;
1580
1581 // TODO(hpayer): Refactor EvacuateObject and call this function instead.
1582 if (heap()->ShouldBePromoted(object->address(), size) &&
1583 TryPromoteObject(object, size)) {
1584 continue;
1585 } 1665 }
1586
1587 AllocationAlignment alignment = object->RequiredAlignment();
1588 AllocationResult allocation = new_space->AllocateRaw(size, alignment);
1589 if (allocation.IsRetry()) {
1590 if (!new_space->AddFreshPage()) {
1591 // Shouldn't happen. We are sweeping linearly, and to-space
1592 // has the same number of pages as from-space, so there is
1593 // always room unless we are in an OOM situation.
1594 FatalProcessOutOfMemory("MarkCompactCollector: semi-space copy\n");
1595 }
1596 allocation = new_space->AllocateRaw(size, alignment);
1597 DCHECK(!allocation.IsRetry());
1598 }
1599 Object* target = allocation.ToObjectChecked();
1600
1601 MigrateObject(HeapObject::cast(target), object, size, NEW_SPACE, nullptr);
1602 if (V8_UNLIKELY(target->IsJSArrayBuffer())) {
1603 heap()->array_buffer_tracker()->MarkLive(JSArrayBuffer::cast(target));
1604 }
1605 heap()->IncrementSemiSpaceCopiedObjectSize(size);
1606 } 1666 }
1607 *cells = 0; 1667 if (mode == kClearMarkbits) {
1668 *cell = 0;
1669 }
1608 } 1670 }
1609 return survivors_size;
1610 } 1671 }
1611 1672
1612 1673
1613 void MarkCompactCollector::DiscoverGreyObjectsInSpace(PagedSpace* space) { 1674 void MarkCompactCollector::DiscoverGreyObjectsInSpace(PagedSpace* space) {
1614 PageIterator it(space); 1675 PageIterator it(space);
1615 while (it.has_next()) { 1676 while (it.has_next()) {
1616 Page* p = it.next(); 1677 Page* p = it.next();
1617 DiscoverGreyObjectsOnPage(p); 1678 DiscoverGreyObjectsOnPage(p);
1618 if (marking_deque()->IsFull()) return; 1679 if (marking_deque()->IsFull()) return;
1619 } 1680 }
(...skipping 1460 matching lines...) Expand 10 before | Expand all | Expand 10 after
3080 new_space->Flip(); 3141 new_space->Flip();
3081 new_space->ResetAllocationInfo(); 3142 new_space->ResetAllocationInfo();
3082 3143
3083 int survivors_size = 0; 3144 int survivors_size = 0;
3084 3145
3085 // First pass: traverse all objects in inactive semispace, remove marks, 3146 // First pass: traverse all objects in inactive semispace, remove marks,
3086 // migrate live objects and write forwarding addresses. This stage puts 3147 // migrate live objects and write forwarding addresses. This stage puts
3087 // new entries in the store buffer and may cause some pages to be marked 3148 // new entries in the store buffer and may cause some pages to be marked
3088 // scan-on-scavenge. 3149 // scan-on-scavenge.
3089 NewSpacePageIterator it(from_bottom, from_top); 3150 NewSpacePageIterator it(from_bottom, from_top);
3151 EvacuateNewSpaceVisitor new_space_visitor(heap());
3090 while (it.has_next()) { 3152 while (it.has_next()) {
3091 NewSpacePage* p = it.next(); 3153 NewSpacePage* p = it.next();
3092 survivors_size += DiscoverAndEvacuateBlackObjectsOnPage(new_space, p); 3154 survivors_size += p->LiveBytes();
3155 IterateLiveObjectsOnPage(p, &new_space_visitor, kClearMarkbits);
3093 } 3156 }
3094 3157
3095 heap_->IncrementYoungSurvivorsCounter(survivors_size); 3158 heap_->IncrementYoungSurvivorsCounter(survivors_size);
3096 new_space->set_age_mark(new_space->top()); 3159 new_space->set_age_mark(new_space->top());
3097 } 3160 }
3098 3161
3099 3162
3100 void MarkCompactCollector::AddEvacuationSlotsBufferSynchronized( 3163 void MarkCompactCollector::AddEvacuationSlotsBufferSynchronized(
3101 SlotsBuffer* evacuation_slots_buffer) { 3164 SlotsBuffer* evacuation_slots_buffer) {
3102 base::LockGuard<base::Mutex> lock_guard(&evacuation_slots_buffers_mutex_); 3165 base::LockGuard<base::Mutex> lock_guard(&evacuation_slots_buffers_mutex_);
3103 evacuation_slots_buffers_.Add(evacuation_slots_buffer); 3166 evacuation_slots_buffers_.Add(evacuation_slots_buffer);
3104 } 3167 }
3105 3168
3106 3169
3107 bool MarkCompactCollector::EvacuateLiveObjectsFromPage(
3108 Page* p, PagedSpace* target_space, SlotsBuffer** evacuation_slots_buffer) {
3109 AlwaysAllocateScope always_allocate(isolate());
3110 DCHECK(p->IsEvacuationCandidate() && !p->WasSwept());
3111
3112 int offsets[16];
3113 for (MarkBitCellIterator it(p); !it.Done(); it.Advance()) {
3114 Address cell_base = it.CurrentCellBase();
3115 MarkBit::CellType* cell = it.CurrentCell();
3116
3117 if (*cell == 0) continue;
3118
3119 int live_objects = MarkWordToObjectStarts(*cell, offsets);
3120 for (int i = 0; i < live_objects; i++) {
3121 Address object_addr = cell_base + offsets[i] * kPointerSize;
3122 HeapObject* object = HeapObject::FromAddress(object_addr);
3123 DCHECK(Marking::IsBlack(Marking::MarkBitFrom(object)));
3124
3125 int size = object->Size();
3126 AllocationAlignment alignment = object->RequiredAlignment();
3127 HeapObject* target_object = nullptr;
3128 AllocationResult allocation = target_space->AllocateRaw(size, alignment);
3129 if (!allocation.To(&target_object)) {
3130 // We need to abort compaction for this page. Make sure that we reset
3131 // the mark bits for objects that have already been migrated.
3132 if (i > 0) {
3133 p->markbits()->ClearRange(p->AddressToMarkbitIndex(p->area_start()),
3134 p->AddressToMarkbitIndex(object_addr));
3135 }
3136 return false;
3137 }
3138
3139 MigrateObject(target_object, object, size, target_space->identity(),
3140 evacuation_slots_buffer);
3141 DCHECK(object->map_word().IsForwardingAddress());
3142 }
3143
3144 // Clear marking bits for current cell.
3145 *cell = 0;
3146 }
3147 p->ResetLiveBytes();
3148 return true;
3149 }
3150
3151
3152 int MarkCompactCollector::NumberOfParallelCompactionTasks() { 3170 int MarkCompactCollector::NumberOfParallelCompactionTasks() {
3153 if (!FLAG_parallel_compaction) return 1; 3171 if (!FLAG_parallel_compaction) return 1;
3154 // Compute the number of needed tasks based on a target compaction time, the 3172 // Compute the number of needed tasks based on a target compaction time, the
3155 // profiled compaction speed and marked live memory. 3173 // profiled compaction speed and marked live memory.
3156 // 3174 //
3157 // The number of parallel compaction tasks is limited by: 3175 // The number of parallel compaction tasks is limited by:
3158 // - #evacuation pages 3176 // - #evacuation pages
3159 // - (#cores - 1) 3177 // - (#cores - 1)
3160 // - a hard limit 3178 // - a hard limit
3161 const double kTargetCompactionTimeInMs = 1; 3179 const double kTargetCompactionTimeInMs = 1;
(...skipping 144 matching lines...) Expand 10 before | Expand all | Expand 10 after
3306 pending_compaction_tasks_semaphore_.Wait(); 3324 pending_compaction_tasks_semaphore_.Wait();
3307 } 3325 }
3308 } 3326 }
3309 compaction_in_progress_ = false; 3327 compaction_in_progress_ = false;
3310 } 3328 }
3311 3329
3312 3330
3313 void MarkCompactCollector::EvacuatePages( 3331 void MarkCompactCollector::EvacuatePages(
3314 CompactionSpaceCollection* compaction_spaces, 3332 CompactionSpaceCollection* compaction_spaces,
3315 SlotsBuffer** evacuation_slots_buffer) { 3333 SlotsBuffer** evacuation_slots_buffer) {
3334 EvacuateOldSpaceVisitor visitor(heap(), compaction_spaces,
3335 evacuation_slots_buffer);
3316 for (int i = 0; i < evacuation_candidates_.length(); i++) { 3336 for (int i = 0; i < evacuation_candidates_.length(); i++) {
3317 Page* p = evacuation_candidates_[i]; 3337 Page* p = evacuation_candidates_[i];
3318 DCHECK(p->IsEvacuationCandidate() || 3338 DCHECK(p->IsEvacuationCandidate() ||
3319 p->IsFlagSet(Page::RESCAN_ON_EVACUATION)); 3339 p->IsFlagSet(Page::RESCAN_ON_EVACUATION));
3320 DCHECK(static_cast<int>(p->parallel_sweeping_state().Value()) == 3340 DCHECK(static_cast<int>(p->parallel_sweeping_state().Value()) ==
3321 MemoryChunk::kSweepingDone); 3341 MemoryChunk::kSweepingDone);
3322 if (p->parallel_compaction_state().TrySetValue( 3342 if (p->parallel_compaction_state().TrySetValue(
3323 MemoryChunk::kCompactingDone, MemoryChunk::kCompactingInProgress)) { 3343 MemoryChunk::kCompactingDone, MemoryChunk::kCompactingInProgress)) {
3324 if (p->IsEvacuationCandidate()) { 3344 if (p->IsEvacuationCandidate()) {
3325 DCHECK_EQ(p->parallel_compaction_state().Value(), 3345 DCHECK_EQ(p->parallel_compaction_state().Value(),
3326 MemoryChunk::kCompactingInProgress); 3346 MemoryChunk::kCompactingInProgress);
3327 double start = heap()->MonotonicallyIncreasingTimeInMs(); 3347 double start = heap()->MonotonicallyIncreasingTimeInMs();
3328 intptr_t live_bytes = p->LiveBytes(); 3348 intptr_t live_bytes = p->LiveBytes();
3329 if (EvacuateLiveObjectsFromPage( 3349 IterateLiveObjectsOnPage(p, &visitor, kClearMarkbits);
3330 p, compaction_spaces->Get(p->owner()->identity()), 3350 if (!visitor.aborted()) {
3331 evacuation_slots_buffer)) { 3351 p->ResetLiveBytes();
3332 p->parallel_compaction_state().SetValue( 3352 p->parallel_compaction_state().SetValue(
3333 MemoryChunk::kCompactingFinalize); 3353 MemoryChunk::kCompactingFinalize);
3334 compaction_spaces->ReportCompactionProgress( 3354 compaction_spaces->ReportCompactionProgress(
3335 heap()->MonotonicallyIncreasingTimeInMs() - start, live_bytes); 3355 heap()->MonotonicallyIncreasingTimeInMs() - start, live_bytes);
3336 } else { 3356 } else {
3337 p->parallel_compaction_state().SetValue( 3357 p->parallel_compaction_state().SetValue(
3338 MemoryChunk::kCompactingAborted); 3358 MemoryChunk::kCompactingAborted);
3359 visitor.reset_aborted();
3339 } 3360 }
3340 } else { 3361 } else {
3341 // There could be popular pages in the list of evacuation candidates 3362 // There could be popular pages in the list of evacuation candidates
3342 // which we do compact. 3363 // which we do compact.
3343 p->parallel_compaction_state().SetValue(MemoryChunk::kCompactingDone); 3364 p->parallel_compaction_state().SetValue(MemoryChunk::kCompactingDone);
3344 } 3365 }
3345 } 3366 }
3346 } 3367 }
3347 } 3368 }
3348 3369
(...skipping 1138 matching lines...) Expand 10 before | Expand all | Expand 10 after
4487 MarkBit mark_bit = Marking::MarkBitFrom(host); 4508 MarkBit mark_bit = Marking::MarkBitFrom(host);
4488 if (Marking::IsBlack(mark_bit)) { 4509 if (Marking::IsBlack(mark_bit)) {
4489 RelocInfo rinfo(pc, RelocInfo::CODE_TARGET, 0, host); 4510 RelocInfo rinfo(pc, RelocInfo::CODE_TARGET, 0, host);
4490 RecordRelocSlot(&rinfo, target); 4511 RecordRelocSlot(&rinfo, target);
4491 } 4512 }
4492 } 4513 }
4493 } 4514 }
4494 4515
4495 } // namespace internal 4516 } // namespace internal
4496 } // namespace v8 4517 } // namespace v8
OLDNEW
« no previous file with comments | « src/heap/mark-compact.h ('k') | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698