Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "src/heap/mark-compact.h" | 5 #include "src/heap/mark-compact.h" |
| 6 | 6 |
| 7 #include "src/base/atomicops.h" | 7 #include "src/base/atomicops.h" |
| 8 #include "src/base/bits.h" | 8 #include "src/base/bits.h" |
| 9 #include "src/base/sys-info.h" | 9 #include "src/base/sys-info.h" |
| 10 #include "src/code-stubs.h" | 10 #include "src/code-stubs.h" |
| (...skipping 218 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 229 VerifyEvacuation(heap, heap->map_space()); | 229 VerifyEvacuation(heap, heap->map_space()); |
| 230 VerifyEvacuation(heap->new_space()); | 230 VerifyEvacuation(heap->new_space()); |
| 231 | 231 |
| 232 VerifyEvacuationVisitor visitor; | 232 VerifyEvacuationVisitor visitor; |
| 233 heap->IterateStrongRoots(&visitor, VISIT_ALL); | 233 heap->IterateStrongRoots(&visitor, VISIT_ALL); |
| 234 } | 234 } |
| 235 #endif // VERIFY_HEAP | 235 #endif // VERIFY_HEAP |
| 236 | 236 |
| 237 | 237 |
| 238 void MarkCompactCollector::SetUp() { | 238 void MarkCompactCollector::SetUp() { |
| 239 DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0); | |
| 240 DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0); | |
| 241 DCHECK(strcmp(Marking::kGreyBitPattern, "11") == 0); | |
| 242 DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0); | |
| 243 | |
| 239 free_list_old_space_.Reset(new FreeList(heap_->old_space())); | 244 free_list_old_space_.Reset(new FreeList(heap_->old_space())); |
| 240 free_list_code_space_.Reset(new FreeList(heap_->code_space())); | 245 free_list_code_space_.Reset(new FreeList(heap_->code_space())); |
| 241 free_list_map_space_.Reset(new FreeList(heap_->map_space())); | 246 free_list_map_space_.Reset(new FreeList(heap_->map_space())); |
| 242 EnsureMarkingDequeIsReserved(); | 247 EnsureMarkingDequeIsReserved(); |
| 243 EnsureMarkingDequeIsCommitted(kMinMarkingDequeSize); | 248 EnsureMarkingDequeIsCommitted(kMinMarkingDequeSize); |
| 244 slots_buffer_allocator_ = new SlotsBufferAllocator(); | 249 slots_buffer_allocator_ = new SlotsBufferAllocator(); |
| 245 } | 250 } |
| 246 | 251 |
| 247 | 252 |
| 248 void MarkCompactCollector::TearDown() { | 253 void MarkCompactCollector::TearDown() { |
| (...skipping 1288 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1537 if (marking_deque()->IsFull()) return; | 1542 if (marking_deque()->IsFull()) return; |
| 1538 offset += 2; | 1543 offset += 2; |
| 1539 grey_objects >>= 2; | 1544 grey_objects >>= 2; |
| 1540 } | 1545 } |
| 1541 | 1546 |
| 1542 grey_objects >>= (Bitmap::kBitsPerCell - 1); | 1547 grey_objects >>= (Bitmap::kBitsPerCell - 1); |
| 1543 } | 1548 } |
| 1544 } | 1549 } |
| 1545 | 1550 |
| 1546 | 1551 |
| 1547 int MarkCompactCollector::DiscoverAndEvacuateBlackObjectsOnPage( | 1552 class MarkCompactCollector::HeapObjectVisitor { |
| 1548 NewSpace* new_space, NewSpacePage* p) { | 1553 public: |
| 1549 DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0); | 1554 virtual ~HeapObjectVisitor() {} |
| 1550 DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0); | 1555 virtual bool Visit(HeapObject* object) = 0; |
| 1551 DCHECK(strcmp(Marking::kGreyBitPattern, "11") == 0); | 1556 }; |
| 1552 DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0); | |
| 1553 | 1557 |
| 1554 MarkBit::CellType* cells = p->markbits()->cells(); | |
| 1555 int survivors_size = 0; | |
| 1556 | 1558 |
| 1557 for (MarkBitCellIterator it(p); !it.Done(); it.Advance()) { | 1559 class MarkCompactCollector::EvacuateNewSpaceVisitor |
| 1560 : public MarkCompactCollector::HeapObjectVisitor { | |
| 1561 public: | |
| 1562 explicit EvacuateNewSpaceVisitor(Heap* heap) : heap_(heap) {} | |
| 1563 | |
| 1564 virtual bool Visit(HeapObject* object) { | |
| 1565 Heap::UpdateAllocationSiteFeedback(object, Heap::RECORD_SCRATCHPAD_SLOT); | |
| 1566 int size = object->Size(); | |
| 1567 | |
| 1568 // TODO(hpayer): Refactor EvacuateObject and call this function instead. | |
| 1569 if (heap_->ShouldBePromoted(object->address(), size) && | |
| 1570 heap_->mark_compact_collector()->TryPromoteObject(object, size)) { | |
| 1571 return true; | |
| 1572 } | |
| 1573 | |
| 1574 AllocationAlignment alignment = object->RequiredAlignment(); | |
| 1575 AllocationResult allocation = | |
| 1576 heap_->new_space()->AllocateRaw(size, alignment); | |
| 1577 if (allocation.IsRetry()) { | |
| 1578 if (!heap_->new_space()->AddFreshPage()) { | |
| 1579 // Shouldn't happen. We are sweeping linearly, and to-space | |
| 1580 // has the same number of pages as from-space, so there is | |
| 1581 // always room unless we are in an OOM situation. | |
| 1582 FatalProcessOutOfMemory("MarkCompactCollector: semi-space copy\n"); | |
| 1583 } | |
| 1584 allocation = heap_->new_space()->AllocateRaw(size, alignment); | |
| 1585 DCHECK(!allocation.IsRetry()); | |
| 1586 } | |
| 1587 Object* target = allocation.ToObjectChecked(); | |
| 1588 | |
| 1589 heap_->mark_compact_collector()->MigrateObject( | |
| 1590 HeapObject::cast(target), object, size, NEW_SPACE, nullptr); | |
| 1591 if (V8_UNLIKELY(target->IsJSArrayBuffer())) { | |
| 1592 heap_->array_buffer_tracker()->MarkLive(JSArrayBuffer::cast(target)); | |
| 1593 } | |
| 1594 heap_->IncrementSemiSpaceCopiedObjectSize(size); | |
| 1595 return true; | |
| 1596 } | |
| 1597 | |
| 1598 private: | |
| 1599 Heap* heap_; | |
| 1600 }; | |
| 1601 | |
| 1602 | |
| 1603 class MarkCompactCollector::EvacuateOldSpaceVisitor | |
| 1604 : public MarkCompactCollector::HeapObjectVisitor { | |
| 1605 public: | |
| 1606 EvacuateOldSpaceVisitor(Heap* heap, | |
| 1607 CompactionSpaceCollection* compaction_spaces, | |
| 1608 SlotsBuffer** evacuation_slots_buffer) | |
| 1609 : heap_(heap), | |
| 1610 compaction_spaces_(compaction_spaces), | |
| 1611 evacuation_slots_buffer_(evacuation_slots_buffer) {} | |
| 1612 | |
| 1613 virtual bool Visit(HeapObject* object) { | |
| 1614 int size = object->Size(); | |
| 1615 AllocationAlignment alignment = object->RequiredAlignment(); | |
| 1616 HeapObject* target_object = nullptr; | |
| 1617 AllocationSpace id = | |
| 1618 Page::FromAddress(object->address())->owner()->identity(); | |
| 1619 AllocationResult allocation = | |
| 1620 compaction_spaces_->Get(id)->AllocateRaw(size, alignment); | |
| 1621 if (!allocation.To(&target_object)) { | |
| 1622 return false; | |
| 1623 } | |
| 1624 heap_->mark_compact_collector()->MigrateObject( | |
| 1625 target_object, object, size, id, evacuation_slots_buffer_); | |
| 1626 DCHECK(object->map_word().IsForwardingAddress()); | |
| 1627 return true; | |
| 1628 } | |
| 1629 | |
| 1630 private: | |
| 1631 Heap* heap_; | |
| 1632 CompactionSpaceCollection* compaction_spaces_; | |
| 1633 SlotsBuffer** evacuation_slots_buffer_; | |
| 1634 }; | |
| 1635 | |
| 1636 | |
| 1637 bool MarkCompactCollector::IterateLiveObjectsOnPage(MemoryChunk* page, | |
| 1638 HeapObjectVisitor* visitor, | |
| 1639 IterationMode mode) { | |
| 1640 DCHECK(!page->IsFlagSet(MemoryChunk::WAS_SWEPT)); | |
| 1641 int offsets[16]; | |
| 1642 for (MarkBitCellIterator it(page); !it.Done(); it.Advance()) { | |
| 1558 Address cell_base = it.CurrentCellBase(); | 1643 Address cell_base = it.CurrentCellBase(); |
| 1559 MarkBit::CellType* cell = it.CurrentCell(); | 1644 MarkBit::CellType* cell = it.CurrentCell(); |
| 1560 | 1645 |
| 1561 MarkBit::CellType current_cell = *cell; | 1646 if (*cell == 0) continue; |
| 1562 if (current_cell == 0) continue; | |
| 1563 | 1647 |
| 1564 int offset = 0; | 1648 int live_objects = MarkWordToObjectStarts(*cell, offsets); |
| 1565 while (current_cell != 0) { | 1649 for (int i = 0; i < live_objects; i++) { |
| 1566 int trailing_zeros = base::bits::CountTrailingZeros32(current_cell); | 1650 Address object_addr = cell_base + offsets[i] * kPointerSize; |
| 1567 current_cell >>= trailing_zeros; | 1651 HeapObject* object = HeapObject::FromAddress(object_addr); |
| 1568 offset += trailing_zeros; | |
| 1569 Address address = cell_base + offset * kPointerSize; | |
| 1570 HeapObject* object = HeapObject::FromAddress(address); | |
| 1571 DCHECK(Marking::IsBlack(Marking::MarkBitFrom(object))); | 1652 DCHECK(Marking::IsBlack(Marking::MarkBitFrom(object))); |
| 1572 | 1653 if (!visitor->Visit(object)) { |
| 1573 int size = object->Size(); | 1654 if ((mode == kClearMarkbits) && (i > 0)) { |
| 1574 survivors_size += size; | 1655 page->markbits()->ClearRange( |
| 1575 | 1656 page->AddressToMarkbitIndex(page->area_start()), |
| 1576 Heap::UpdateAllocationSiteFeedback(object, Heap::RECORD_SCRATCHPAD_SLOT); | 1657 page->AddressToMarkbitIndex(object_addr)); |
| 1577 | 1658 } |
| 1578 offset += 2; | 1659 return false; |
| 1579 current_cell >>= 2; | |
| 1580 | |
| 1581 // TODO(hpayer): Refactor EvacuateObject and call this function instead. | |
| 1582 if (heap()->ShouldBePromoted(object->address(), size) && | |
| 1583 TryPromoteObject(object, size)) { | |
| 1584 continue; | |
| 1585 } | 1660 } |
| 1586 | |
| 1587 AllocationAlignment alignment = object->RequiredAlignment(); | |
| 1588 AllocationResult allocation = new_space->AllocateRaw(size, alignment); | |
| 1589 if (allocation.IsRetry()) { | |
| 1590 if (!new_space->AddFreshPage()) { | |
| 1591 // Shouldn't happen. We are sweeping linearly, and to-space | |
| 1592 // has the same number of pages as from-space, so there is | |
| 1593 // always room unless we are in an OOM situation. | |
| 1594 FatalProcessOutOfMemory("MarkCompactCollector: semi-space copy\n"); | |
| 1595 } | |
| 1596 allocation = new_space->AllocateRaw(size, alignment); | |
| 1597 DCHECK(!allocation.IsRetry()); | |
| 1598 } | |
| 1599 Object* target = allocation.ToObjectChecked(); | |
| 1600 | |
| 1601 MigrateObject(HeapObject::cast(target), object, size, NEW_SPACE, nullptr); | |
| 1602 if (V8_UNLIKELY(target->IsJSArrayBuffer())) { | |
| 1603 heap()->array_buffer_tracker()->MarkLive(JSArrayBuffer::cast(target)); | |
| 1604 } | |
| 1605 heap()->IncrementSemiSpaceCopiedObjectSize(size); | |
| 1606 } | 1661 } |
| 1607 *cells = 0; | 1662 if (mode == kClearMarkbits) { |
| 1663 *cell = 0; | |
| 1664 } | |
| 1608 } | 1665 } |
| 1609 return survivors_size; | 1666 return true; |
| 1610 } | 1667 } |
| 1611 | 1668 |
| 1612 | 1669 |
| 1613 void MarkCompactCollector::DiscoverGreyObjectsInSpace(PagedSpace* space) { | 1670 void MarkCompactCollector::DiscoverGreyObjectsInSpace(PagedSpace* space) { |
| 1614 PageIterator it(space); | 1671 PageIterator it(space); |
| 1615 while (it.has_next()) { | 1672 while (it.has_next()) { |
| 1616 Page* p = it.next(); | 1673 Page* p = it.next(); |
| 1617 DiscoverGreyObjectsOnPage(p); | 1674 DiscoverGreyObjectsOnPage(p); |
| 1618 if (marking_deque()->IsFull()) return; | 1675 if (marking_deque()->IsFull()) return; |
| 1619 } | 1676 } |
| (...skipping 1460 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 3080 new_space->Flip(); | 3137 new_space->Flip(); |
| 3081 new_space->ResetAllocationInfo(); | 3138 new_space->ResetAllocationInfo(); |
| 3082 | 3139 |
| 3083 int survivors_size = 0; | 3140 int survivors_size = 0; |
| 3084 | 3141 |
| 3085 // First pass: traverse all objects in inactive semispace, remove marks, | 3142 // First pass: traverse all objects in inactive semispace, remove marks, |
| 3086 // migrate live objects and write forwarding addresses. This stage puts | 3143 // migrate live objects and write forwarding addresses. This stage puts |
| 3087 // new entries in the store buffer and may cause some pages to be marked | 3144 // new entries in the store buffer and may cause some pages to be marked |
| 3088 // scan-on-scavenge. | 3145 // scan-on-scavenge. |
| 3089 NewSpacePageIterator it(from_bottom, from_top); | 3146 NewSpacePageIterator it(from_bottom, from_top); |
| 3147 EvacuateNewSpaceVisitor new_space_visitor(heap()); | |
| 3090 while (it.has_next()) { | 3148 while (it.has_next()) { |
| 3091 NewSpacePage* p = it.next(); | 3149 NewSpacePage* p = it.next(); |
| 3092 survivors_size += DiscoverAndEvacuateBlackObjectsOnPage(new_space, p); | 3150 survivors_size += p->LiveBytes(); |
| 3151 IterateLiveObjectsOnPage(p, &new_space_visitor, kClearMarkbits); | |
|
Hannes Payer (out of office)
2015/11/26 09:57:32
This one should always return true. Can we DCHECK
Michael Lippautz
2015/11/26 14:56:28
Done.
| |
| 3093 } | 3152 } |
| 3094 | 3153 |
| 3095 heap_->IncrementYoungSurvivorsCounter(survivors_size); | 3154 heap_->IncrementYoungSurvivorsCounter(survivors_size); |
| 3096 new_space->set_age_mark(new_space->top()); | 3155 new_space->set_age_mark(new_space->top()); |
| 3097 } | 3156 } |
| 3098 | 3157 |
| 3099 | 3158 |
| 3100 void MarkCompactCollector::AddEvacuationSlotsBufferSynchronized( | 3159 void MarkCompactCollector::AddEvacuationSlotsBufferSynchronized( |
| 3101 SlotsBuffer* evacuation_slots_buffer) { | 3160 SlotsBuffer* evacuation_slots_buffer) { |
| 3102 base::LockGuard<base::Mutex> lock_guard(&evacuation_slots_buffers_mutex_); | 3161 base::LockGuard<base::Mutex> lock_guard(&evacuation_slots_buffers_mutex_); |
| 3103 evacuation_slots_buffers_.Add(evacuation_slots_buffer); | 3162 evacuation_slots_buffers_.Add(evacuation_slots_buffer); |
| 3104 } | 3163 } |
| 3105 | 3164 |
| 3106 | 3165 |
| 3107 bool MarkCompactCollector::EvacuateLiveObjectsFromPage( | |
| 3108 Page* p, PagedSpace* target_space, SlotsBuffer** evacuation_slots_buffer) { | |
| 3109 AlwaysAllocateScope always_allocate(isolate()); | |
| 3110 DCHECK(p->IsEvacuationCandidate() && !p->WasSwept()); | |
| 3111 | |
| 3112 int offsets[16]; | |
| 3113 for (MarkBitCellIterator it(p); !it.Done(); it.Advance()) { | |
| 3114 Address cell_base = it.CurrentCellBase(); | |
| 3115 MarkBit::CellType* cell = it.CurrentCell(); | |
| 3116 | |
| 3117 if (*cell == 0) continue; | |
| 3118 | |
| 3119 int live_objects = MarkWordToObjectStarts(*cell, offsets); | |
| 3120 for (int i = 0; i < live_objects; i++) { | |
| 3121 Address object_addr = cell_base + offsets[i] * kPointerSize; | |
| 3122 HeapObject* object = HeapObject::FromAddress(object_addr); | |
| 3123 DCHECK(Marking::IsBlack(Marking::MarkBitFrom(object))); | |
| 3124 | |
| 3125 int size = object->Size(); | |
| 3126 AllocationAlignment alignment = object->RequiredAlignment(); | |
| 3127 HeapObject* target_object = nullptr; | |
| 3128 AllocationResult allocation = target_space->AllocateRaw(size, alignment); | |
| 3129 if (!allocation.To(&target_object)) { | |
| 3130 // We need to abort compaction for this page. Make sure that we reset | |
| 3131 // the mark bits for objects that have already been migrated. | |
| 3132 if (i > 0) { | |
| 3133 p->markbits()->ClearRange(p->AddressToMarkbitIndex(p->area_start()), | |
| 3134 p->AddressToMarkbitIndex(object_addr)); | |
| 3135 } | |
| 3136 return false; | |
| 3137 } | |
| 3138 | |
| 3139 MigrateObject(target_object, object, size, target_space->identity(), | |
| 3140 evacuation_slots_buffer); | |
| 3141 DCHECK(object->map_word().IsForwardingAddress()); | |
| 3142 } | |
| 3143 | |
| 3144 // Clear marking bits for current cell. | |
| 3145 *cell = 0; | |
| 3146 } | |
| 3147 p->ResetLiveBytes(); | |
| 3148 return true; | |
| 3149 } | |
| 3150 | |
| 3151 | |
| 3152 int MarkCompactCollector::NumberOfParallelCompactionTasks() { | 3166 int MarkCompactCollector::NumberOfParallelCompactionTasks() { |
| 3153 if (!FLAG_parallel_compaction) return 1; | 3167 if (!FLAG_parallel_compaction) return 1; |
| 3154 // Compute the number of needed tasks based on a target compaction time, the | 3168 // Compute the number of needed tasks based on a target compaction time, the |
| 3155 // profiled compaction speed and marked live memory. | 3169 // profiled compaction speed and marked live memory. |
| 3156 // | 3170 // |
| 3157 // The number of parallel compaction tasks is limited by: | 3171 // The number of parallel compaction tasks is limited by: |
| 3158 // - #evacuation pages | 3172 // - #evacuation pages |
| 3159 // - (#cores - 1) | 3173 // - (#cores - 1) |
| 3160 // - a hard limit | 3174 // - a hard limit |
| 3161 const double kTargetCompactionTimeInMs = 1; | 3175 const double kTargetCompactionTimeInMs = 1; |
| (...skipping 144 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 3306 pending_compaction_tasks_semaphore_.Wait(); | 3320 pending_compaction_tasks_semaphore_.Wait(); |
| 3307 } | 3321 } |
| 3308 } | 3322 } |
| 3309 compaction_in_progress_ = false; | 3323 compaction_in_progress_ = false; |
| 3310 } | 3324 } |
| 3311 | 3325 |
| 3312 | 3326 |
| 3313 void MarkCompactCollector::EvacuatePages( | 3327 void MarkCompactCollector::EvacuatePages( |
| 3314 CompactionSpaceCollection* compaction_spaces, | 3328 CompactionSpaceCollection* compaction_spaces, |
| 3315 SlotsBuffer** evacuation_slots_buffer) { | 3329 SlotsBuffer** evacuation_slots_buffer) { |
| 3330 EvacuateOldSpaceVisitor visitor(heap(), compaction_spaces, | |
| 3331 evacuation_slots_buffer); | |
| 3316 for (int i = 0; i < evacuation_candidates_.length(); i++) { | 3332 for (int i = 0; i < evacuation_candidates_.length(); i++) { |
| 3317 Page* p = evacuation_candidates_[i]; | 3333 Page* p = evacuation_candidates_[i]; |
| 3318 DCHECK(p->IsEvacuationCandidate() || | 3334 DCHECK(p->IsEvacuationCandidate() || |
| 3319 p->IsFlagSet(Page::RESCAN_ON_EVACUATION)); | 3335 p->IsFlagSet(Page::RESCAN_ON_EVACUATION)); |
| 3320 DCHECK(static_cast<int>(p->parallel_sweeping_state().Value()) == | 3336 DCHECK(static_cast<int>(p->parallel_sweeping_state().Value()) == |
| 3321 MemoryChunk::kSweepingDone); | 3337 MemoryChunk::kSweepingDone); |
| 3322 if (p->parallel_compaction_state().TrySetValue( | 3338 if (p->parallel_compaction_state().TrySetValue( |
| 3323 MemoryChunk::kCompactingDone, MemoryChunk::kCompactingInProgress)) { | 3339 MemoryChunk::kCompactingDone, MemoryChunk::kCompactingInProgress)) { |
| 3324 if (p->IsEvacuationCandidate()) { | 3340 if (p->IsEvacuationCandidate()) { |
| 3325 DCHECK_EQ(p->parallel_compaction_state().Value(), | 3341 DCHECK_EQ(p->parallel_compaction_state().Value(), |
| 3326 MemoryChunk::kCompactingInProgress); | 3342 MemoryChunk::kCompactingInProgress); |
| 3327 double start = heap()->MonotonicallyIncreasingTimeInMs(); | 3343 double start = heap()->MonotonicallyIncreasingTimeInMs(); |
| 3328 intptr_t live_bytes = p->LiveBytes(); | 3344 intptr_t live_bytes = p->LiveBytes(); |
| 3329 if (EvacuateLiveObjectsFromPage( | 3345 if (IterateLiveObjectsOnPage(p, &visitor, kClearMarkbits)) { |
| 3330 p, compaction_spaces->Get(p->owner()->identity()), | 3346 p->ResetLiveBytes(); |
| 3331 evacuation_slots_buffer)) { | |
| 3332 p->parallel_compaction_state().SetValue( | 3347 p->parallel_compaction_state().SetValue( |
| 3333 MemoryChunk::kCompactingFinalize); | 3348 MemoryChunk::kCompactingFinalize); |
| 3334 compaction_spaces->ReportCompactionProgress( | 3349 compaction_spaces->ReportCompactionProgress( |
| 3335 heap()->MonotonicallyIncreasingTimeInMs() - start, live_bytes); | 3350 heap()->MonotonicallyIncreasingTimeInMs() - start, live_bytes); |
| 3336 } else { | 3351 } else { |
| 3337 p->parallel_compaction_state().SetValue( | 3352 p->parallel_compaction_state().SetValue( |
| 3338 MemoryChunk::kCompactingAborted); | 3353 MemoryChunk::kCompactingAborted); |
| 3339 } | 3354 } |
| 3340 } else { | 3355 } else { |
| 3341 // There could be popular pages in the list of evacuation candidates | 3356 // There could be popular pages in the list of evacuation candidates |
| (...skipping 1145 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 4487 MarkBit mark_bit = Marking::MarkBitFrom(host); | 4502 MarkBit mark_bit = Marking::MarkBitFrom(host); |
| 4488 if (Marking::IsBlack(mark_bit)) { | 4503 if (Marking::IsBlack(mark_bit)) { |
| 4489 RelocInfo rinfo(pc, RelocInfo::CODE_TARGET, 0, host); | 4504 RelocInfo rinfo(pc, RelocInfo::CODE_TARGET, 0, host); |
| 4490 RecordRelocSlot(&rinfo, target); | 4505 RecordRelocSlot(&rinfo, target); |
| 4491 } | 4506 } |
| 4492 } | 4507 } |
| 4493 } | 4508 } |
| 4494 | 4509 |
| 4495 } // namespace internal | 4510 } // namespace internal |
| 4496 } // namespace v8 | 4511 } // namespace v8 |
| OLD | NEW |