Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(744)

Side by Side Diff: src/heap/mark-compact.cc

Issue 1470253002: [heap] Refactor evacuation for young and old gen into visitors. (Closed) Base URL: https://chromium.googlesource.com/v8/v8.git@master
Patch Set: don't check for WAS_SWEPT, as we never sweep newspace pages Created 5 years ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « src/heap/mark-compact.h ('k') | no next file » | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "src/heap/mark-compact.h" 5 #include "src/heap/mark-compact.h"
6 6
7 #include "src/base/atomicops.h" 7 #include "src/base/atomicops.h"
8 #include "src/base/bits.h" 8 #include "src/base/bits.h"
9 #include "src/base/sys-info.h" 9 #include "src/base/sys-info.h"
10 #include "src/code-stubs.h" 10 #include "src/code-stubs.h"
(...skipping 218 matching lines...) Expand 10 before | Expand all | Expand 10 after
229 VerifyEvacuation(heap, heap->map_space()); 229 VerifyEvacuation(heap, heap->map_space());
230 VerifyEvacuation(heap->new_space()); 230 VerifyEvacuation(heap->new_space());
231 231
232 VerifyEvacuationVisitor visitor; 232 VerifyEvacuationVisitor visitor;
233 heap->IterateStrongRoots(&visitor, VISIT_ALL); 233 heap->IterateStrongRoots(&visitor, VISIT_ALL);
234 } 234 }
235 #endif // VERIFY_HEAP 235 #endif // VERIFY_HEAP
236 236
237 237
238 void MarkCompactCollector::SetUp() { 238 void MarkCompactCollector::SetUp() {
239 DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
240 DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
241 DCHECK(strcmp(Marking::kGreyBitPattern, "11") == 0);
242 DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
243
239 free_list_old_space_.Reset(new FreeList(heap_->old_space())); 244 free_list_old_space_.Reset(new FreeList(heap_->old_space()));
240 free_list_code_space_.Reset(new FreeList(heap_->code_space())); 245 free_list_code_space_.Reset(new FreeList(heap_->code_space()));
241 free_list_map_space_.Reset(new FreeList(heap_->map_space())); 246 free_list_map_space_.Reset(new FreeList(heap_->map_space()));
242 EnsureMarkingDequeIsReserved(); 247 EnsureMarkingDequeIsReserved();
243 EnsureMarkingDequeIsCommitted(kMinMarkingDequeSize); 248 EnsureMarkingDequeIsCommitted(kMinMarkingDequeSize);
244 slots_buffer_allocator_ = new SlotsBufferAllocator(); 249 slots_buffer_allocator_ = new SlotsBufferAllocator();
245 } 250 }
246 251
247 252
248 void MarkCompactCollector::TearDown() { 253 void MarkCompactCollector::TearDown() {
(...skipping 1243 matching lines...) Expand 10 before | Expand all | Expand 10 after
1492 if (marking_deque()->IsFull()) return; 1497 if (marking_deque()->IsFull()) return;
1493 offset += 2; 1498 offset += 2;
1494 grey_objects >>= 2; 1499 grey_objects >>= 2;
1495 } 1500 }
1496 1501
1497 grey_objects >>= (Bitmap::kBitsPerCell - 1); 1502 grey_objects >>= (Bitmap::kBitsPerCell - 1);
1498 } 1503 }
1499 } 1504 }
1500 1505
1501 1506
1502 int MarkCompactCollector::DiscoverAndEvacuateBlackObjectsOnPage( 1507 class MarkCompactCollector::HeapObjectVisitor {
1503 NewSpace* new_space, NewSpacePage* p) { 1508 public:
1504 DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0); 1509 virtual ~HeapObjectVisitor() {}
1505 DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0); 1510 virtual bool Visit(HeapObject* object) = 0;
1506 DCHECK(strcmp(Marking::kGreyBitPattern, "11") == 0); 1511 };
1507 DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
1508 1512
1509 MarkBit::CellType* cells = p->markbits()->cells();
1510 int survivors_size = 0;
1511 1513
1512 for (MarkBitCellIterator it(p); !it.Done(); it.Advance()) { 1514 class MarkCompactCollector::EvacuateNewSpaceVisitor
1515 : public MarkCompactCollector::HeapObjectVisitor {
1516 public:
1517 explicit EvacuateNewSpaceVisitor(Heap* heap) : heap_(heap) {}
1518
1519 virtual bool Visit(HeapObject* object) {
1520 Heap::UpdateAllocationSiteFeedback(object, Heap::RECORD_SCRATCHPAD_SLOT);
1521 int size = object->Size();
1522
1523 // TODO(hpayer): Refactor EvacuateObject and call this function instead.
1524 if (heap_->ShouldBePromoted(object->address(), size) &&
1525 heap_->mark_compact_collector()->TryPromoteObject(object, size)) {
1526 return true;
1527 }
1528
1529 AllocationAlignment alignment = object->RequiredAlignment();
1530 AllocationResult allocation =
1531 heap_->new_space()->AllocateRaw(size, alignment);
1532 if (allocation.IsRetry()) {
1533 if (!heap_->new_space()->AddFreshPage()) {
1534 // Shouldn't happen. We are sweeping linearly, and to-space
1535 // has the same number of pages as from-space, so there is
1536 // always room unless we are in an OOM situation.
1537 FatalProcessOutOfMemory("MarkCompactCollector: semi-space copy\n");
1538 }
1539 allocation = heap_->new_space()->AllocateRaw(size, alignment);
1540 DCHECK(!allocation.IsRetry());
1541 }
1542 Object* target = allocation.ToObjectChecked();
1543
1544 heap_->mark_compact_collector()->MigrateObject(
1545 HeapObject::cast(target), object, size, NEW_SPACE, nullptr);
1546 if (V8_UNLIKELY(target->IsJSArrayBuffer())) {
1547 heap_->array_buffer_tracker()->MarkLive(JSArrayBuffer::cast(target));
1548 }
1549 heap_->IncrementSemiSpaceCopiedObjectSize(size);
1550 return true;
1551 }
1552
1553 private:
1554 Heap* heap_;
1555 };
1556
1557
1558 class MarkCompactCollector::EvacuateOldSpaceVisitor
1559 : public MarkCompactCollector::HeapObjectVisitor {
1560 public:
1561 EvacuateOldSpaceVisitor(Heap* heap,
1562 CompactionSpaceCollection* compaction_spaces,
1563 SlotsBuffer** evacuation_slots_buffer)
1564 : heap_(heap),
1565 compaction_spaces_(compaction_spaces),
1566 evacuation_slots_buffer_(evacuation_slots_buffer) {}
1567
1568 virtual bool Visit(HeapObject* object) {
1569 int size = object->Size();
1570 AllocationAlignment alignment = object->RequiredAlignment();
1571 HeapObject* target_object = nullptr;
1572 AllocationSpace id =
1573 Page::FromAddress(object->address())->owner()->identity();
1574 AllocationResult allocation =
1575 compaction_spaces_->Get(id)->AllocateRaw(size, alignment);
1576 if (!allocation.To(&target_object)) {
1577 return false;
1578 }
1579 heap_->mark_compact_collector()->MigrateObject(
1580 target_object, object, size, id, evacuation_slots_buffer_);
1581 DCHECK(object->map_word().IsForwardingAddress());
1582 return true;
1583 }
1584
1585 private:
1586 Heap* heap_;
1587 CompactionSpaceCollection* compaction_spaces_;
1588 SlotsBuffer** evacuation_slots_buffer_;
1589 };
1590
1591
1592 bool MarkCompactCollector::IterateLiveObjectsOnPage(MemoryChunk* page,
1593 HeapObjectVisitor* visitor,
1594 IterationMode mode) {
1595 Address offsets[16];
1596 for (MarkBitCellIterator it(page); !it.Done(); it.Advance()) {
1513 Address cell_base = it.CurrentCellBase(); 1597 Address cell_base = it.CurrentCellBase();
1514 MarkBit::CellType* cell = it.CurrentCell(); 1598 MarkBit::CellType* cell = it.CurrentCell();
1515 1599
1516 MarkBit::CellType current_cell = *cell; 1600 if (*cell == 0) continue;
1517 if (current_cell == 0) continue;
1518 1601
1519 int offset = 0; 1602 int live_objects = MarkWordToObjectStarts(*cell, cell_base, offsets);
1520 while (current_cell != 0) { 1603 for (int i = 0; i < live_objects; i++) {
1521 int trailing_zeros = base::bits::CountTrailingZeros32(current_cell); 1604 HeapObject* object = HeapObject::FromAddress(offsets[i]);
1522 current_cell >>= trailing_zeros;
1523 offset += trailing_zeros;
1524 Address address = cell_base + offset * kPointerSize;
1525 HeapObject* object = HeapObject::FromAddress(address);
1526 DCHECK(Marking::IsBlack(Marking::MarkBitFrom(object))); 1605 DCHECK(Marking::IsBlack(Marking::MarkBitFrom(object)));
1527 1606 if (!visitor->Visit(object)) {
1528 int size = object->Size(); 1607 if ((mode == kClearMarkbits) && (i > 0)) {
1529 survivors_size += size; 1608 page->markbits()->ClearRange(
1530 1609 page->AddressToMarkbitIndex(page->area_start()),
1531 Heap::UpdateAllocationSiteFeedback(object, Heap::RECORD_SCRATCHPAD_SLOT); 1610 page->AddressToMarkbitIndex(offsets[i]));
1532 1611 }
1533 offset += 2; 1612 return false;
1534 current_cell >>= 2;
1535
1536 // TODO(hpayer): Refactor EvacuateObject and call this function instead.
1537 if (heap()->ShouldBePromoted(object->address(), size) &&
1538 TryPromoteObject(object, size)) {
1539 continue;
1540 } 1613 }
1541
1542 AllocationAlignment alignment = object->RequiredAlignment();
1543 AllocationResult allocation = new_space->AllocateRaw(size, alignment);
1544 if (allocation.IsRetry()) {
1545 if (!new_space->AddFreshPage()) {
1546 // Shouldn't happen. We are sweeping linearly, and to-space
1547 // has the same number of pages as from-space, so there is
1548 // always room unless we are in an OOM situation.
1549 FatalProcessOutOfMemory("MarkCompactCollector: semi-space copy\n");
1550 }
1551 allocation = new_space->AllocateRaw(size, alignment);
1552 DCHECK(!allocation.IsRetry());
1553 }
1554 Object* target = allocation.ToObjectChecked();
1555
1556 MigrateObject(HeapObject::cast(target), object, size, NEW_SPACE, nullptr);
1557 if (V8_UNLIKELY(target->IsJSArrayBuffer())) {
1558 heap()->array_buffer_tracker()->MarkLive(JSArrayBuffer::cast(target));
1559 }
1560 heap()->IncrementSemiSpaceCopiedObjectSize(size);
1561 } 1614 }
1562 *cells = 0; 1615 if (mode == kClearMarkbits) {
1616 *cell = 0;
1617 }
1563 } 1618 }
1564 return survivors_size; 1619 return true;
1565 } 1620 }
1566 1621
1567 1622
1568 void MarkCompactCollector::DiscoverGreyObjectsInSpace(PagedSpace* space) { 1623 void MarkCompactCollector::DiscoverGreyObjectsInSpace(PagedSpace* space) {
1569 PageIterator it(space); 1624 PageIterator it(space);
1570 while (it.has_next()) { 1625 while (it.has_next()) {
1571 Page* p = it.next(); 1626 Page* p = it.next();
1572 DiscoverGreyObjectsOnPage(p); 1627 DiscoverGreyObjectsOnPage(p);
1573 if (marking_deque()->IsFull()) return; 1628 if (marking_deque()->IsFull()) return;
1574 } 1629 }
(...skipping 1500 matching lines...) Expand 10 before | Expand all | Expand 10 after
3075 new_space->Flip(); 3130 new_space->Flip();
3076 new_space->ResetAllocationInfo(); 3131 new_space->ResetAllocationInfo();
3077 3132
3078 int survivors_size = 0; 3133 int survivors_size = 0;
3079 3134
3080 // First pass: traverse all objects in inactive semispace, remove marks, 3135 // First pass: traverse all objects in inactive semispace, remove marks,
3081 // migrate live objects and write forwarding addresses. This stage puts 3136 // migrate live objects and write forwarding addresses. This stage puts
3082 // new entries in the store buffer and may cause some pages to be marked 3137 // new entries in the store buffer and may cause some pages to be marked
3083 // scan-on-scavenge. 3138 // scan-on-scavenge.
3084 NewSpacePageIterator it(from_bottom, from_top); 3139 NewSpacePageIterator it(from_bottom, from_top);
3140 EvacuateNewSpaceVisitor new_space_visitor(heap());
3085 while (it.has_next()) { 3141 while (it.has_next()) {
3086 NewSpacePage* p = it.next(); 3142 NewSpacePage* p = it.next();
3087 survivors_size += DiscoverAndEvacuateBlackObjectsOnPage(new_space, p); 3143 survivors_size += p->LiveBytes();
3144 bool ok = IterateLiveObjectsOnPage(p, &new_space_visitor, kClearMarkbits);
3145 USE(ok);
3146 DCHECK(ok);
3088 } 3147 }
3089 3148
3090 heap_->IncrementYoungSurvivorsCounter(survivors_size); 3149 heap_->IncrementYoungSurvivorsCounter(survivors_size);
3091 new_space->set_age_mark(new_space->top()); 3150 new_space->set_age_mark(new_space->top());
3092 } 3151 }
3093 3152
3094 3153
3095 void MarkCompactCollector::AddEvacuationSlotsBufferSynchronized( 3154 void MarkCompactCollector::AddEvacuationSlotsBufferSynchronized(
3096 SlotsBuffer* evacuation_slots_buffer) { 3155 SlotsBuffer* evacuation_slots_buffer) {
3097 base::LockGuard<base::Mutex> lock_guard(&evacuation_slots_buffers_mutex_); 3156 base::LockGuard<base::Mutex> lock_guard(&evacuation_slots_buffers_mutex_);
3098 evacuation_slots_buffers_.Add(evacuation_slots_buffer); 3157 evacuation_slots_buffers_.Add(evacuation_slots_buffer);
3099 } 3158 }
3100 3159
3101 3160
3102 bool MarkCompactCollector::EvacuateLiveObjectsFromPage(
3103 Page* p, PagedSpace* target_space, SlotsBuffer** evacuation_slots_buffer) {
3104 AlwaysAllocateScope always_allocate(isolate());
3105 DCHECK(p->IsEvacuationCandidate() && !p->WasSwept());
3106
3107 Address starts[16];
3108 for (MarkBitCellIterator it(p); !it.Done(); it.Advance()) {
3109 Address cell_base = it.CurrentCellBase();
3110 MarkBit::CellType* cell = it.CurrentCell();
3111
3112 if (*cell == 0) continue;
3113
3114 int live_objects = MarkWordToObjectStarts(*cell, cell_base, starts);
3115 for (int i = 0; i < live_objects; i++) {
3116 HeapObject* object = HeapObject::FromAddress(starts[i]);
3117 DCHECK(Marking::IsBlack(Marking::MarkBitFrom(object)));
3118
3119 int size = object->Size();
3120 AllocationAlignment alignment = object->RequiredAlignment();
3121 HeapObject* target_object = nullptr;
3122 AllocationResult allocation = target_space->AllocateRaw(size, alignment);
3123 if (!allocation.To(&target_object)) {
3124 // We need to abort compaction for this page. Make sure that we reset
3125 // the mark bits for objects that have already been migrated.
3126 if (i > 0) {
3127 p->markbits()->ClearRange(p->AddressToMarkbitIndex(p->area_start()),
3128 p->AddressToMarkbitIndex(starts[i]));
3129 }
3130 return false;
3131 }
3132
3133 MigrateObject(target_object, object, size, target_space->identity(),
3134 evacuation_slots_buffer);
3135 DCHECK(object->map_word().IsForwardingAddress());
3136 }
3137
3138 // Clear marking bits for current cell.
3139 *cell = 0;
3140 }
3141 p->ResetLiveBytes();
3142 return true;
3143 }
3144
3145
3146 int MarkCompactCollector::NumberOfParallelCompactionTasks() { 3161 int MarkCompactCollector::NumberOfParallelCompactionTasks() {
3147 if (!FLAG_parallel_compaction) return 1; 3162 if (!FLAG_parallel_compaction) return 1;
3148 // Compute the number of needed tasks based on a target compaction time, the 3163 // Compute the number of needed tasks based on a target compaction time, the
3149 // profiled compaction speed and marked live memory. 3164 // profiled compaction speed and marked live memory.
3150 // 3165 //
3151 // The number of parallel compaction tasks is limited by: 3166 // The number of parallel compaction tasks is limited by:
3152 // - #evacuation pages 3167 // - #evacuation pages
3153 // - (#cores - 1) 3168 // - (#cores - 1)
3154 // - a hard limit 3169 // - a hard limit
3155 const double kTargetCompactionTimeInMs = 1; 3170 const double kTargetCompactionTimeInMs = 1;
(...skipping 144 matching lines...) Expand 10 before | Expand all | Expand 10 after
3300 pending_compaction_tasks_semaphore_.Wait(); 3315 pending_compaction_tasks_semaphore_.Wait();
3301 } 3316 }
3302 } 3317 }
3303 compaction_in_progress_ = false; 3318 compaction_in_progress_ = false;
3304 } 3319 }
3305 3320
3306 3321
3307 void MarkCompactCollector::EvacuatePages( 3322 void MarkCompactCollector::EvacuatePages(
3308 CompactionSpaceCollection* compaction_spaces, 3323 CompactionSpaceCollection* compaction_spaces,
3309 SlotsBuffer** evacuation_slots_buffer) { 3324 SlotsBuffer** evacuation_slots_buffer) {
3325 EvacuateOldSpaceVisitor visitor(heap(), compaction_spaces,
3326 evacuation_slots_buffer);
3310 for (int i = 0; i < evacuation_candidates_.length(); i++) { 3327 for (int i = 0; i < evacuation_candidates_.length(); i++) {
3311 Page* p = evacuation_candidates_[i]; 3328 Page* p = evacuation_candidates_[i];
3312 DCHECK(p->IsEvacuationCandidate() || 3329 DCHECK(p->IsEvacuationCandidate() ||
3313 p->IsFlagSet(Page::RESCAN_ON_EVACUATION)); 3330 p->IsFlagSet(Page::RESCAN_ON_EVACUATION));
3314 DCHECK(static_cast<int>(p->parallel_sweeping_state().Value()) == 3331 DCHECK(static_cast<int>(p->parallel_sweeping_state().Value()) ==
3315 MemoryChunk::kSweepingDone); 3332 MemoryChunk::kSweepingDone);
3316 if (p->parallel_compaction_state().TrySetValue( 3333 if (p->parallel_compaction_state().TrySetValue(
3317 MemoryChunk::kCompactingDone, MemoryChunk::kCompactingInProgress)) { 3334 MemoryChunk::kCompactingDone, MemoryChunk::kCompactingInProgress)) {
3318 if (p->IsEvacuationCandidate()) { 3335 if (p->IsEvacuationCandidate()) {
3319 DCHECK_EQ(p->parallel_compaction_state().Value(), 3336 DCHECK_EQ(p->parallel_compaction_state().Value(),
3320 MemoryChunk::kCompactingInProgress); 3337 MemoryChunk::kCompactingInProgress);
3321 double start = heap()->MonotonicallyIncreasingTimeInMs(); 3338 double start = heap()->MonotonicallyIncreasingTimeInMs();
3322 intptr_t live_bytes = p->LiveBytes(); 3339 intptr_t live_bytes = p->LiveBytes();
3323 if (EvacuateLiveObjectsFromPage( 3340 if (IterateLiveObjectsOnPage(p, &visitor, kClearMarkbits)) {
3324 p, compaction_spaces->Get(p->owner()->identity()), 3341 p->ResetLiveBytes();
3325 evacuation_slots_buffer)) {
3326 p->parallel_compaction_state().SetValue( 3342 p->parallel_compaction_state().SetValue(
3327 MemoryChunk::kCompactingFinalize); 3343 MemoryChunk::kCompactingFinalize);
3328 compaction_spaces->ReportCompactionProgress( 3344 compaction_spaces->ReportCompactionProgress(
3329 heap()->MonotonicallyIncreasingTimeInMs() - start, live_bytes); 3345 heap()->MonotonicallyIncreasingTimeInMs() - start, live_bytes);
3330 } else { 3346 } else {
3331 p->parallel_compaction_state().SetValue( 3347 p->parallel_compaction_state().SetValue(
3332 MemoryChunk::kCompactingAborted); 3348 MemoryChunk::kCompactingAborted);
3333 } 3349 }
3334 } else { 3350 } else {
3335 // There could be popular pages in the list of evacuation candidates 3351 // There could be popular pages in the list of evacuation candidates
(...skipping 770 matching lines...) Expand 10 before | Expand all | Expand 10 after
4106 MarkBit mark_bit = Marking::MarkBitFrom(host); 4122 MarkBit mark_bit = Marking::MarkBitFrom(host);
4107 if (Marking::IsBlack(mark_bit)) { 4123 if (Marking::IsBlack(mark_bit)) {
4108 RelocInfo rinfo(pc, RelocInfo::CODE_TARGET, 0, host); 4124 RelocInfo rinfo(pc, RelocInfo::CODE_TARGET, 0, host);
4109 RecordRelocSlot(&rinfo, target); 4125 RecordRelocSlot(&rinfo, target);
4110 } 4126 }
4111 } 4127 }
4112 } 4128 }
4113 4129
4114 } // namespace internal 4130 } // namespace internal
4115 } // namespace v8 4131 } // namespace v8
OLDNEW
« no previous file with comments | « src/heap/mark-compact.h ('k') | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698