| OLD | NEW |
| 1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "src/heap/mark-compact.h" | 5 #include "src/heap/mark-compact.h" |
| 6 | 6 |
| 7 #include "src/base/atomicops.h" | 7 #include "src/base/atomicops.h" |
| 8 #include "src/base/bits.h" | 8 #include "src/base/bits.h" |
| 9 #include "src/base/sys-info.h" | 9 #include "src/base/sys-info.h" |
| 10 #include "src/code-stubs.h" | 10 #include "src/code-stubs.h" |
| (...skipping 218 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 229 VerifyEvacuation(heap, heap->map_space()); | 229 VerifyEvacuation(heap, heap->map_space()); |
| 230 VerifyEvacuation(heap->new_space()); | 230 VerifyEvacuation(heap->new_space()); |
| 231 | 231 |
| 232 VerifyEvacuationVisitor visitor; | 232 VerifyEvacuationVisitor visitor; |
| 233 heap->IterateStrongRoots(&visitor, VISIT_ALL); | 233 heap->IterateStrongRoots(&visitor, VISIT_ALL); |
| 234 } | 234 } |
| 235 #endif // VERIFY_HEAP | 235 #endif // VERIFY_HEAP |
| 236 | 236 |
| 237 | 237 |
| 238 void MarkCompactCollector::SetUp() { | 238 void MarkCompactCollector::SetUp() { |
| 239 DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0); | |
| 240 DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0); | |
| 241 DCHECK(strcmp(Marking::kGreyBitPattern, "11") == 0); | |
| 242 DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0); | |
| 243 | |
| 244 free_list_old_space_.Reset(new FreeList(heap_->old_space())); | 239 free_list_old_space_.Reset(new FreeList(heap_->old_space())); |
| 245 free_list_code_space_.Reset(new FreeList(heap_->code_space())); | 240 free_list_code_space_.Reset(new FreeList(heap_->code_space())); |
| 246 free_list_map_space_.Reset(new FreeList(heap_->map_space())); | 241 free_list_map_space_.Reset(new FreeList(heap_->map_space())); |
| 247 EnsureMarkingDequeIsReserved(); | 242 EnsureMarkingDequeIsReserved(); |
| 248 EnsureMarkingDequeIsCommitted(kMinMarkingDequeSize); | 243 EnsureMarkingDequeIsCommitted(kMinMarkingDequeSize); |
| 249 slots_buffer_allocator_ = new SlotsBufferAllocator(); | 244 slots_buffer_allocator_ = new SlotsBufferAllocator(); |
| 250 } | 245 } |
| 251 | 246 |
| 252 | 247 |
| 253 void MarkCompactCollector::TearDown() { | 248 void MarkCompactCollector::TearDown() { |
| (...skipping 1243 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1497 if (marking_deque()->IsFull()) return; | 1492 if (marking_deque()->IsFull()) return; |
| 1498 offset += 2; | 1493 offset += 2; |
| 1499 grey_objects >>= 2; | 1494 grey_objects >>= 2; |
| 1500 } | 1495 } |
| 1501 | 1496 |
| 1502 grey_objects >>= (Bitmap::kBitsPerCell - 1); | 1497 grey_objects >>= (Bitmap::kBitsPerCell - 1); |
| 1503 } | 1498 } |
| 1504 } | 1499 } |
| 1505 | 1500 |
| 1506 | 1501 |
| 1507 class MarkCompactCollector::HeapObjectVisitor { | 1502 int MarkCompactCollector::DiscoverAndEvacuateBlackObjectsOnPage( |
| 1508 public: | 1503 NewSpace* new_space, NewSpacePage* p) { |
| 1509 virtual ~HeapObjectVisitor() {} | 1504 DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0); |
| 1510 virtual bool Visit(HeapObject* object) = 0; | 1505 DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0); |
| 1511 }; | 1506 DCHECK(strcmp(Marking::kGreyBitPattern, "11") == 0); |
| 1507 DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0); |
| 1512 | 1508 |
| 1509 MarkBit::CellType* cells = p->markbits()->cells(); |
| 1510 int survivors_size = 0; |
| 1513 | 1511 |
| 1514 class MarkCompactCollector::EvacuateNewSpaceVisitor | 1512 for (MarkBitCellIterator it(p); !it.Done(); it.Advance()) { |
| 1515 : public MarkCompactCollector::HeapObjectVisitor { | |
| 1516 public: | |
| 1517 explicit EvacuateNewSpaceVisitor(Heap* heap) : heap_(heap) {} | |
| 1518 | |
| 1519 virtual bool Visit(HeapObject* object) { | |
| 1520 Heap::UpdateAllocationSiteFeedback(object, Heap::RECORD_SCRATCHPAD_SLOT); | |
| 1521 int size = object->Size(); | |
| 1522 | |
| 1523 // TODO(hpayer): Refactor EvacuateObject and call this function instead. | |
| 1524 if (heap_->ShouldBePromoted(object->address(), size) && | |
| 1525 heap_->mark_compact_collector()->TryPromoteObject(object, size)) { | |
| 1526 return true; | |
| 1527 } | |
| 1528 | |
| 1529 AllocationAlignment alignment = object->RequiredAlignment(); | |
| 1530 AllocationResult allocation = | |
| 1531 heap_->new_space()->AllocateRaw(size, alignment); | |
| 1532 if (allocation.IsRetry()) { | |
| 1533 if (!heap_->new_space()->AddFreshPage()) { | |
| 1534 // Shouldn't happen. We are sweeping linearly, and to-space | |
| 1535 // has the same number of pages as from-space, so there is | |
| 1536 // always room unless we are in an OOM situation. | |
| 1537 FatalProcessOutOfMemory("MarkCompactCollector: semi-space copy\n"); | |
| 1538 } | |
| 1539 allocation = heap_->new_space()->AllocateRaw(size, alignment); | |
| 1540 DCHECK(!allocation.IsRetry()); | |
| 1541 } | |
| 1542 Object* target = allocation.ToObjectChecked(); | |
| 1543 | |
| 1544 heap_->mark_compact_collector()->MigrateObject( | |
| 1545 HeapObject::cast(target), object, size, NEW_SPACE, nullptr); | |
| 1546 if (V8_UNLIKELY(target->IsJSArrayBuffer())) { | |
| 1547 heap_->array_buffer_tracker()->MarkLive(JSArrayBuffer::cast(target)); | |
| 1548 } | |
| 1549 heap_->IncrementSemiSpaceCopiedObjectSize(size); | |
| 1550 return true; | |
| 1551 } | |
| 1552 | |
| 1553 private: | |
| 1554 Heap* heap_; | |
| 1555 }; | |
| 1556 | |
| 1557 | |
| 1558 class MarkCompactCollector::EvacuateOldSpaceVisitor | |
| 1559 : public MarkCompactCollector::HeapObjectVisitor { | |
| 1560 public: | |
| 1561 EvacuateOldSpaceVisitor(Heap* heap, | |
| 1562 CompactionSpaceCollection* compaction_spaces, | |
| 1563 SlotsBuffer** evacuation_slots_buffer) | |
| 1564 : heap_(heap), | |
| 1565 compaction_spaces_(compaction_spaces), | |
| 1566 evacuation_slots_buffer_(evacuation_slots_buffer) {} | |
| 1567 | |
| 1568 virtual bool Visit(HeapObject* object) { | |
| 1569 int size = object->Size(); | |
| 1570 AllocationAlignment alignment = object->RequiredAlignment(); | |
| 1571 HeapObject* target_object = nullptr; | |
| 1572 AllocationSpace id = | |
| 1573 Page::FromAddress(object->address())->owner()->identity(); | |
| 1574 AllocationResult allocation = | |
| 1575 compaction_spaces_->Get(id)->AllocateRaw(size, alignment); | |
| 1576 if (!allocation.To(&target_object)) { | |
| 1577 return false; | |
| 1578 } | |
| 1579 heap_->mark_compact_collector()->MigrateObject( | |
| 1580 target_object, object, size, id, evacuation_slots_buffer_); | |
| 1581 DCHECK(object->map_word().IsForwardingAddress()); | |
| 1582 return true; | |
| 1583 } | |
| 1584 | |
| 1585 private: | |
| 1586 Heap* heap_; | |
| 1587 CompactionSpaceCollection* compaction_spaces_; | |
| 1588 SlotsBuffer** evacuation_slots_buffer_; | |
| 1589 }; | |
| 1590 | |
| 1591 | |
| 1592 bool MarkCompactCollector::IterateLiveObjectsOnPage(MemoryChunk* page, | |
| 1593 HeapObjectVisitor* visitor, | |
| 1594 IterationMode mode) { | |
| 1595 Address offsets[16]; | |
| 1596 for (MarkBitCellIterator it(page); !it.Done(); it.Advance()) { | |
| 1597 Address cell_base = it.CurrentCellBase(); | 1513 Address cell_base = it.CurrentCellBase(); |
| 1598 MarkBit::CellType* cell = it.CurrentCell(); | 1514 MarkBit::CellType* cell = it.CurrentCell(); |
| 1599 | 1515 |
| 1600 if (*cell == 0) continue; | 1516 MarkBit::CellType current_cell = *cell; |
| 1517 if (current_cell == 0) continue; |
| 1601 | 1518 |
| 1602 int live_objects = MarkWordToObjectStarts(*cell, cell_base, offsets); | 1519 int offset = 0; |
| 1603 for (int i = 0; i < live_objects; i++) { | 1520 while (current_cell != 0) { |
| 1604 HeapObject* object = HeapObject::FromAddress(offsets[i]); | 1521 int trailing_zeros = base::bits::CountTrailingZeros32(current_cell); |
| 1522 current_cell >>= trailing_zeros; |
| 1523 offset += trailing_zeros; |
| 1524 Address address = cell_base + offset * kPointerSize; |
| 1525 HeapObject* object = HeapObject::FromAddress(address); |
| 1605 DCHECK(Marking::IsBlack(Marking::MarkBitFrom(object))); | 1526 DCHECK(Marking::IsBlack(Marking::MarkBitFrom(object))); |
| 1606 if (!visitor->Visit(object)) { | 1527 |
| 1607 if ((mode == kClearMarkbits) && (i > 0)) { | 1528 int size = object->Size(); |
| 1608 page->markbits()->ClearRange( | 1529 survivors_size += size; |
| 1609 page->AddressToMarkbitIndex(page->area_start()), | 1530 |
| 1610 page->AddressToMarkbitIndex(offsets[i])); | 1531 Heap::UpdateAllocationSiteFeedback(object, Heap::RECORD_SCRATCHPAD_SLOT); |
| 1532 |
| 1533 offset += 2; |
| 1534 current_cell >>= 2; |
| 1535 |
| 1536 // TODO(hpayer): Refactor EvacuateObject and call this function instead. |
| 1537 if (heap()->ShouldBePromoted(object->address(), size) && |
| 1538 TryPromoteObject(object, size)) { |
| 1539 continue; |
| 1540 } |
| 1541 |
| 1542 AllocationAlignment alignment = object->RequiredAlignment(); |
| 1543 AllocationResult allocation = new_space->AllocateRaw(size, alignment); |
| 1544 if (allocation.IsRetry()) { |
| 1545 if (!new_space->AddFreshPage()) { |
| 1546 // Shouldn't happen. We are sweeping linearly, and to-space |
| 1547 // has the same number of pages as from-space, so there is |
| 1548 // always room unless we are in an OOM situation. |
| 1549 FatalProcessOutOfMemory("MarkCompactCollector: semi-space copy\n"); |
| 1611 } | 1550 } |
| 1612 return false; | 1551 allocation = new_space->AllocateRaw(size, alignment); |
| 1552 DCHECK(!allocation.IsRetry()); |
| 1613 } | 1553 } |
| 1554 Object* target = allocation.ToObjectChecked(); |
| 1555 |
| 1556 MigrateObject(HeapObject::cast(target), object, size, NEW_SPACE, nullptr); |
| 1557 if (V8_UNLIKELY(target->IsJSArrayBuffer())) { |
| 1558 heap()->array_buffer_tracker()->MarkLive(JSArrayBuffer::cast(target)); |
| 1559 } |
| 1560 heap()->IncrementSemiSpaceCopiedObjectSize(size); |
| 1614 } | 1561 } |
| 1615 if (mode == kClearMarkbits) { | 1562 *cells = 0; |
| 1616 *cell = 0; | |
| 1617 } | |
| 1618 } | 1563 } |
| 1619 return true; | 1564 return survivors_size; |
| 1620 } | 1565 } |
| 1621 | 1566 |
| 1622 | 1567 |
| 1623 void MarkCompactCollector::DiscoverGreyObjectsInSpace(PagedSpace* space) { | 1568 void MarkCompactCollector::DiscoverGreyObjectsInSpace(PagedSpace* space) { |
| 1624 PageIterator it(space); | 1569 PageIterator it(space); |
| 1625 while (it.has_next()) { | 1570 while (it.has_next()) { |
| 1626 Page* p = it.next(); | 1571 Page* p = it.next(); |
| 1627 DiscoverGreyObjectsOnPage(p); | 1572 DiscoverGreyObjectsOnPage(p); |
| 1628 if (marking_deque()->IsFull()) return; | 1573 if (marking_deque()->IsFull()) return; |
| 1629 } | 1574 } |
| (...skipping 1500 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3130 new_space->Flip(); | 3075 new_space->Flip(); |
| 3131 new_space->ResetAllocationInfo(); | 3076 new_space->ResetAllocationInfo(); |
| 3132 | 3077 |
| 3133 int survivors_size = 0; | 3078 int survivors_size = 0; |
| 3134 | 3079 |
| 3135 // First pass: traverse all objects in inactive semispace, remove marks, | 3080 // First pass: traverse all objects in inactive semispace, remove marks, |
| 3136 // migrate live objects and write forwarding addresses. This stage puts | 3081 // migrate live objects and write forwarding addresses. This stage puts |
| 3137 // new entries in the store buffer and may cause some pages to be marked | 3082 // new entries in the store buffer and may cause some pages to be marked |
| 3138 // scan-on-scavenge. | 3083 // scan-on-scavenge. |
| 3139 NewSpacePageIterator it(from_bottom, from_top); | 3084 NewSpacePageIterator it(from_bottom, from_top); |
| 3140 EvacuateNewSpaceVisitor new_space_visitor(heap()); | |
| 3141 while (it.has_next()) { | 3085 while (it.has_next()) { |
| 3142 NewSpacePage* p = it.next(); | 3086 NewSpacePage* p = it.next(); |
| 3143 survivors_size += p->LiveBytes(); | 3087 survivors_size += DiscoverAndEvacuateBlackObjectsOnPage(new_space, p); |
| 3144 bool ok = IterateLiveObjectsOnPage(p, &new_space_visitor, kClearMarkbits); | |
| 3145 USE(ok); | |
| 3146 DCHECK(ok); | |
| 3147 } | 3088 } |
| 3148 | 3089 |
| 3149 heap_->IncrementYoungSurvivorsCounter(survivors_size); | 3090 heap_->IncrementYoungSurvivorsCounter(survivors_size); |
| 3150 new_space->set_age_mark(new_space->top()); | 3091 new_space->set_age_mark(new_space->top()); |
| 3151 } | 3092 } |
| 3152 | 3093 |
| 3153 | 3094 |
| 3154 void MarkCompactCollector::AddEvacuationSlotsBufferSynchronized( | 3095 void MarkCompactCollector::AddEvacuationSlotsBufferSynchronized( |
| 3155 SlotsBuffer* evacuation_slots_buffer) { | 3096 SlotsBuffer* evacuation_slots_buffer) { |
| 3156 base::LockGuard<base::Mutex> lock_guard(&evacuation_slots_buffers_mutex_); | 3097 base::LockGuard<base::Mutex> lock_guard(&evacuation_slots_buffers_mutex_); |
| 3157 evacuation_slots_buffers_.Add(evacuation_slots_buffer); | 3098 evacuation_slots_buffers_.Add(evacuation_slots_buffer); |
| 3158 } | 3099 } |
| 3159 | 3100 |
| 3160 | 3101 |
| 3102 bool MarkCompactCollector::EvacuateLiveObjectsFromPage( |
| 3103 Page* p, PagedSpace* target_space, SlotsBuffer** evacuation_slots_buffer) { |
| 3104 AlwaysAllocateScope always_allocate(isolate()); |
| 3105 DCHECK(p->IsEvacuationCandidate() && !p->WasSwept()); |
| 3106 |
| 3107 Address starts[16]; |
| 3108 for (MarkBitCellIterator it(p); !it.Done(); it.Advance()) { |
| 3109 Address cell_base = it.CurrentCellBase(); |
| 3110 MarkBit::CellType* cell = it.CurrentCell(); |
| 3111 |
| 3112 if (*cell == 0) continue; |
| 3113 |
| 3114 int live_objects = MarkWordToObjectStarts(*cell, cell_base, starts); |
| 3115 for (int i = 0; i < live_objects; i++) { |
| 3116 HeapObject* object = HeapObject::FromAddress(starts[i]); |
| 3117 DCHECK(Marking::IsBlack(Marking::MarkBitFrom(object))); |
| 3118 |
| 3119 int size = object->Size(); |
| 3120 AllocationAlignment alignment = object->RequiredAlignment(); |
| 3121 HeapObject* target_object = nullptr; |
| 3122 AllocationResult allocation = target_space->AllocateRaw(size, alignment); |
| 3123 if (!allocation.To(&target_object)) { |
| 3124 // We need to abort compaction for this page. Make sure that we reset |
| 3125 // the mark bits for objects that have already been migrated. |
| 3126 if (i > 0) { |
| 3127 p->markbits()->ClearRange(p->AddressToMarkbitIndex(p->area_start()), |
| 3128 p->AddressToMarkbitIndex(starts[i])); |
| 3129 } |
| 3130 return false; |
| 3131 } |
| 3132 |
| 3133 MigrateObject(target_object, object, size, target_space->identity(), |
| 3134 evacuation_slots_buffer); |
| 3135 DCHECK(object->map_word().IsForwardingAddress()); |
| 3136 } |
| 3137 |
| 3138 // Clear marking bits for current cell. |
| 3139 *cell = 0; |
| 3140 } |
| 3141 p->ResetLiveBytes(); |
| 3142 return true; |
| 3143 } |
| 3144 |
| 3145 |
| 3161 int MarkCompactCollector::NumberOfParallelCompactionTasks() { | 3146 int MarkCompactCollector::NumberOfParallelCompactionTasks() { |
| 3162 if (!FLAG_parallel_compaction) return 1; | 3147 if (!FLAG_parallel_compaction) return 1; |
| 3163 // Compute the number of needed tasks based on a target compaction time, the | 3148 // Compute the number of needed tasks based on a target compaction time, the |
| 3164 // profiled compaction speed and marked live memory. | 3149 // profiled compaction speed and marked live memory. |
| 3165 // | 3150 // |
| 3166 // The number of parallel compaction tasks is limited by: | 3151 // The number of parallel compaction tasks is limited by: |
| 3167 // - #evacuation pages | 3152 // - #evacuation pages |
| 3168 // - (#cores - 1) | 3153 // - (#cores - 1) |
| 3169 // - a hard limit | 3154 // - a hard limit |
| 3170 const double kTargetCompactionTimeInMs = 1; | 3155 const double kTargetCompactionTimeInMs = 1; |
| (...skipping 144 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3315 pending_compaction_tasks_semaphore_.Wait(); | 3300 pending_compaction_tasks_semaphore_.Wait(); |
| 3316 } | 3301 } |
| 3317 } | 3302 } |
| 3318 compaction_in_progress_ = false; | 3303 compaction_in_progress_ = false; |
| 3319 } | 3304 } |
| 3320 | 3305 |
| 3321 | 3306 |
| 3322 void MarkCompactCollector::EvacuatePages( | 3307 void MarkCompactCollector::EvacuatePages( |
| 3323 CompactionSpaceCollection* compaction_spaces, | 3308 CompactionSpaceCollection* compaction_spaces, |
| 3324 SlotsBuffer** evacuation_slots_buffer) { | 3309 SlotsBuffer** evacuation_slots_buffer) { |
| 3325 EvacuateOldSpaceVisitor visitor(heap(), compaction_spaces, | |
| 3326 evacuation_slots_buffer); | |
| 3327 for (int i = 0; i < evacuation_candidates_.length(); i++) { | 3310 for (int i = 0; i < evacuation_candidates_.length(); i++) { |
| 3328 Page* p = evacuation_candidates_[i]; | 3311 Page* p = evacuation_candidates_[i]; |
| 3329 DCHECK(p->IsEvacuationCandidate() || | 3312 DCHECK(p->IsEvacuationCandidate() || |
| 3330 p->IsFlagSet(Page::RESCAN_ON_EVACUATION)); | 3313 p->IsFlagSet(Page::RESCAN_ON_EVACUATION)); |
| 3331 DCHECK(static_cast<int>(p->parallel_sweeping_state().Value()) == | 3314 DCHECK(static_cast<int>(p->parallel_sweeping_state().Value()) == |
| 3332 MemoryChunk::kSweepingDone); | 3315 MemoryChunk::kSweepingDone); |
| 3333 if (p->parallel_compaction_state().TrySetValue( | 3316 if (p->parallel_compaction_state().TrySetValue( |
| 3334 MemoryChunk::kCompactingDone, MemoryChunk::kCompactingInProgress)) { | 3317 MemoryChunk::kCompactingDone, MemoryChunk::kCompactingInProgress)) { |
| 3335 if (p->IsEvacuationCandidate()) { | 3318 if (p->IsEvacuationCandidate()) { |
| 3336 DCHECK_EQ(p->parallel_compaction_state().Value(), | 3319 DCHECK_EQ(p->parallel_compaction_state().Value(), |
| 3337 MemoryChunk::kCompactingInProgress); | 3320 MemoryChunk::kCompactingInProgress); |
| 3338 double start = heap()->MonotonicallyIncreasingTimeInMs(); | 3321 double start = heap()->MonotonicallyIncreasingTimeInMs(); |
| 3339 intptr_t live_bytes = p->LiveBytes(); | 3322 intptr_t live_bytes = p->LiveBytes(); |
| 3340 if (IterateLiveObjectsOnPage(p, &visitor, kClearMarkbits)) { | 3323 if (EvacuateLiveObjectsFromPage( |
| 3341 p->ResetLiveBytes(); | 3324 p, compaction_spaces->Get(p->owner()->identity()), |
| 3325 evacuation_slots_buffer)) { |
| 3342 p->parallel_compaction_state().SetValue( | 3326 p->parallel_compaction_state().SetValue( |
| 3343 MemoryChunk::kCompactingFinalize); | 3327 MemoryChunk::kCompactingFinalize); |
| 3344 compaction_spaces->ReportCompactionProgress( | 3328 compaction_spaces->ReportCompactionProgress( |
| 3345 heap()->MonotonicallyIncreasingTimeInMs() - start, live_bytes); | 3329 heap()->MonotonicallyIncreasingTimeInMs() - start, live_bytes); |
| 3346 } else { | 3330 } else { |
| 3347 p->parallel_compaction_state().SetValue( | 3331 p->parallel_compaction_state().SetValue( |
| 3348 MemoryChunk::kCompactingAborted); | 3332 MemoryChunk::kCompactingAborted); |
| 3349 } | 3333 } |
| 3350 } else { | 3334 } else { |
| 3351 // There could be popular pages in the list of evacuation candidates | 3335 // There could be popular pages in the list of evacuation candidates |
| (...skipping 770 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 4122 MarkBit mark_bit = Marking::MarkBitFrom(host); | 4106 MarkBit mark_bit = Marking::MarkBitFrom(host); |
| 4123 if (Marking::IsBlack(mark_bit)) { | 4107 if (Marking::IsBlack(mark_bit)) { |
| 4124 RelocInfo rinfo(pc, RelocInfo::CODE_TARGET, 0, host); | 4108 RelocInfo rinfo(pc, RelocInfo::CODE_TARGET, 0, host); |
| 4125 RecordRelocSlot(&rinfo, target); | 4109 RecordRelocSlot(&rinfo, target); |
| 4126 } | 4110 } |
| 4127 } | 4111 } |
| 4128 } | 4112 } |
| 4129 | 4113 |
| 4130 } // namespace internal | 4114 } // namespace internal |
| 4131 } // namespace v8 | 4115 } // namespace v8 |
| OLD | NEW |