Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(21)

Side by Side Diff: src/heap/mark-compact.cc

Issue 2846043003: [heap] Add concept of migration observers to evacuation (Closed)
Patch Set: Polish static version Created 3 years, 7 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « src/heap/mark-compact.h ('k') | no next file » | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "src/heap/mark-compact.h" 5 #include "src/heap/mark-compact.h"
6 6
7 #include "src/base/atomicops.h" 7 #include "src/base/atomicops.h"
8 #include "src/base/bits.h" 8 #include "src/base/bits.h"
9 #include "src/base/sys-info.h" 9 #include "src/base/sys-info.h"
10 #include "src/code-stubs.h" 10 #include "src/code-stubs.h"
(...skipping 1737 matching lines...) Expand 10 before | Expand all | Expand 10 after
1748 RememberedSet<OLD_TO_NEW>::Insert(Page::FromAddress(slot), slot); 1748 RememberedSet<OLD_TO_NEW>::Insert(Page::FromAddress(slot), slot);
1749 } else if (p->IsEvacuationCandidate()) { 1749 } else if (p->IsEvacuationCandidate()) {
1750 RememberedSet<OLD_TO_OLD>::Insert(Page::FromAddress(slot), slot); 1750 RememberedSet<OLD_TO_OLD>::Insert(Page::FromAddress(slot), slot);
1751 } 1751 }
1752 } 1752 }
1753 } 1753 }
1754 1754
1755 MarkCompactCollector* collector_; 1755 MarkCompactCollector* collector_;
1756 }; 1756 };
1757 1757
1758 class MigrationObserver {
1759 public:
1760 explicit MigrationObserver(Heap* heap) : heap_(heap) {}
1761
1762 virtual ~MigrationObserver() {}
1763 virtual void Move(AllocationSpace dest, HeapObject* src, HeapObject* dst,
1764 int size) = 0;
1765
1766 protected:
1767 Heap* heap_;
1768 };
1769
1770 class ProfilingMigrationObserver final : public MigrationObserver {
1771 public:
1772 explicit ProfilingMigrationObserver(Heap* heap) : MigrationObserver(heap) {}
1773
1774 inline void Move(AllocationSpace dest, HeapObject* src, HeapObject* dst,
1775 int size) final {
1776 if (dest == CODE_SPACE || (dest == OLD_SPACE && dst->IsBytecodeArray())) {
1777 PROFILE(heap_->isolate(),
1778 CodeMoveEvent(AbstractCode::cast(src), dst->address()));
1779 }
1780 heap_->OnMoveEvent(dst, src, size);
1781 }
1782 };
1783
1758 class HeapObjectVisitor { 1784 class HeapObjectVisitor {
1759 public: 1785 public:
1760 virtual ~HeapObjectVisitor() {} 1786 virtual ~HeapObjectVisitor() {}
1761 virtual bool Visit(HeapObject* object) = 0; 1787 virtual bool Visit(HeapObject* object) = 0;
1762 }; 1788 };
1763 1789
1764 class EvacuateVisitorBase : public HeapObjectVisitor { 1790 class EvacuateVisitorBase : public HeapObjectVisitor {
1791 public:
1792 void AddObserver(MigrationObserver* observer) {
1793 migration_function_ = RawMigrateObject<MigrationMode::kObserved>;
1794 observers_.push_back(observer);
1795 }
1796
1765 protected: 1797 protected:
1766 enum MigrationMode { kFast, kProfiled }; 1798 enum MigrationMode { kFast, kObserved };
1799
1800 typedef void (*MigrateFunction)(EvacuateVisitorBase* base, HeapObject* dst,
1801 HeapObject* src, int size,
1802 AllocationSpace dest);
1803
1804 template <MigrationMode mode>
1805 static void RawMigrateObject(EvacuateVisitorBase* base, HeapObject* dst,
1806 HeapObject* src, int size,
1807 AllocationSpace dest) {
1808 Address dst_addr = dst->address();
1809 Address src_addr = src->address();
1810 DCHECK(base->heap_->AllowedToBeMigrated(src, dest));
1811 DCHECK(dest != LO_SPACE);
1812 if (dest == OLD_SPACE) {
1813 DCHECK_OBJECT_SIZE(size);
1814 DCHECK(IsAligned(size, kPointerSize));
1815 base->heap_->CopyBlock(dst_addr, src_addr, size);
1816 if (mode != MigrationMode::kFast)
1817 base->ExecuteMigrationObservers(dest, src, dst, size);
1818 dst->IterateBodyFast(dst->map()->instance_type(), size,
1819 base->record_visitor_);
1820 } else if (dest == CODE_SPACE) {
1821 DCHECK_CODEOBJECT_SIZE(size, base->heap_->code_space());
1822 base->heap_->CopyBlock(dst_addr, src_addr, size);
1823 Code::cast(dst)->Relocate(dst_addr - src_addr);
1824 if (mode != MigrationMode::kFast)
1825 base->ExecuteMigrationObservers(dest, src, dst, size);
1826 dst->IterateBodyFast(dst->map()->instance_type(), size,
1827 base->record_visitor_);
1828 } else {
1829 DCHECK_OBJECT_SIZE(size);
1830 DCHECK(dest == NEW_SPACE);
1831 base->heap_->CopyBlock(dst_addr, src_addr, size);
1832 if (mode != MigrationMode::kFast)
1833 base->ExecuteMigrationObservers(dest, src, dst, size);
1834 }
1835 base::NoBarrier_Store(reinterpret_cast<base::AtomicWord*>(src_addr),
1836 reinterpret_cast<base::AtomicWord>(dst_addr));
1837 }
1767 1838
1768 EvacuateVisitorBase(Heap* heap, CompactionSpaceCollection* compaction_spaces, 1839 EvacuateVisitorBase(Heap* heap, CompactionSpaceCollection* compaction_spaces,
1769 RecordMigratedSlotVisitor* record_visitor) 1840 RecordMigratedSlotVisitor* record_visitor)
1770 : heap_(heap), 1841 : heap_(heap),
1771 compaction_spaces_(compaction_spaces), 1842 compaction_spaces_(compaction_spaces),
1772 record_visitor_(record_visitor), 1843 record_visitor_(record_visitor) {
1773 profiling_( 1844 migration_function_ = RawMigrateObject<MigrationMode::kFast>;
1774 heap->isolate()->is_profiling() || 1845 }
1775 heap->isolate()->logger()->is_logging_code_events() ||
1776 heap->isolate()->heap_profiler()->is_tracking_object_moves()) {}
1777 1846
1778 inline bool TryEvacuateObject(PagedSpace* target_space, HeapObject* object, 1847 inline bool TryEvacuateObject(PagedSpace* target_space, HeapObject* object,
1779 HeapObject** target_object) { 1848 HeapObject** target_object) {
1780 #ifdef VERIFY_HEAP 1849 #ifdef VERIFY_HEAP
1781 if (AbortCompactionForTesting(object)) return false; 1850 if (AbortCompactionForTesting(object)) return false;
1782 #endif // VERIFY_HEAP 1851 #endif // VERIFY_HEAP
1783 int size = object->Size(); 1852 int size = object->Size();
1784 AllocationAlignment alignment = object->RequiredAlignment(); 1853 AllocationAlignment alignment = object->RequiredAlignment();
1785 AllocationResult allocation = target_space->AllocateRaw(size, alignment); 1854 AllocationResult allocation = target_space->AllocateRaw(size, alignment);
1786 if (allocation.To(target_object)) { 1855 if (allocation.To(target_object)) {
1787 MigrateObject(*target_object, object, size, target_space->identity()); 1856 MigrateObject(*target_object, object, size, target_space->identity());
1788 return true; 1857 return true;
1789 } 1858 }
1790 return false; 1859 return false;
1791 } 1860 }
1792 1861
1862 inline void ExecuteMigrationObservers(AllocationSpace dest, HeapObject* src,
1863 HeapObject* dst, int size) {
1864 for (MigrationObserver* obs : observers_) {
1865 obs->Move(dest, src, dst, size);
1866 }
1867 }
1868
1793 inline void MigrateObject(HeapObject* dst, HeapObject* src, int size, 1869 inline void MigrateObject(HeapObject* dst, HeapObject* src, int size,
1794 AllocationSpace dest) { 1870 AllocationSpace dest) {
1795 if (profiling_) { 1871 migration_function_(this, dst, src, size, dest);
1796 MigrateObject<kProfiled>(dst, src, size, dest);
1797 } else {
1798 MigrateObject<kFast>(dst, src, size, dest);
1799 }
1800 }
1801
1802 template <MigrationMode mode>
1803 inline void MigrateObject(HeapObject* dst, HeapObject* src, int size,
1804 AllocationSpace dest) {
1805 Address dst_addr = dst->address();
1806 Address src_addr = src->address();
1807 DCHECK(heap_->AllowedToBeMigrated(src, dest));
1808 DCHECK(dest != LO_SPACE);
1809 if (dest == OLD_SPACE) {
1810 DCHECK_OBJECT_SIZE(size);
1811 DCHECK(IsAligned(size, kPointerSize));
1812 heap_->CopyBlock(dst_addr, src_addr, size);
1813 if ((mode == kProfiled) && dst->IsBytecodeArray()) {
1814 PROFILE(heap_->isolate(),
1815 CodeMoveEvent(AbstractCode::cast(src), dst_addr));
1816 }
1817 dst->IterateBodyFast(dst->map()->instance_type(), size, record_visitor_);
1818 } else if (dest == CODE_SPACE) {
1819 DCHECK_CODEOBJECT_SIZE(size, heap_->code_space());
1820 if (mode == kProfiled) {
1821 PROFILE(heap_->isolate(),
1822 CodeMoveEvent(AbstractCode::cast(src), dst_addr));
1823 }
1824 heap_->CopyBlock(dst_addr, src_addr, size);
1825 Code::cast(dst)->Relocate(dst_addr - src_addr);
1826 RecordMigratedSlotVisitor visitor(heap_->mark_compact_collector());
1827 dst->IterateBodyFast(dst->map()->instance_type(), size, record_visitor_);
1828 } else {
1829 DCHECK_OBJECT_SIZE(size);
1830 DCHECK(dest == NEW_SPACE);
1831 heap_->CopyBlock(dst_addr, src_addr, size);
1832 }
1833 if (mode == kProfiled) {
1834 heap_->OnMoveEvent(dst, src, size);
1835 }
1836 base::NoBarrier_Store(reinterpret_cast<base::AtomicWord*>(src_addr),
1837 reinterpret_cast<base::AtomicWord>(dst_addr));
1838 } 1872 }
1839 1873
1840 #ifdef VERIFY_HEAP 1874 #ifdef VERIFY_HEAP
1841 bool AbortCompactionForTesting(HeapObject* object) { 1875 bool AbortCompactionForTesting(HeapObject* object) {
1842 if (FLAG_stress_compaction) { 1876 if (FLAG_stress_compaction) {
1843 const uintptr_t mask = static_cast<uintptr_t>(FLAG_random_seed) & 1877 const uintptr_t mask = static_cast<uintptr_t>(FLAG_random_seed) &
1844 Page::kPageAlignmentMask & ~kPointerAlignmentMask; 1878 Page::kPageAlignmentMask & ~kPointerAlignmentMask;
1845 if ((reinterpret_cast<uintptr_t>(object->address()) & 1879 if ((reinterpret_cast<uintptr_t>(object->address()) &
1846 Page::kPageAlignmentMask) == mask) { 1880 Page::kPageAlignmentMask) == mask) {
1847 Page* page = Page::FromAddress(object->address()); 1881 Page* page = Page::FromAddress(object->address());
1848 if (page->IsFlagSet(Page::COMPACTION_WAS_ABORTED_FOR_TESTING)) { 1882 if (page->IsFlagSet(Page::COMPACTION_WAS_ABORTED_FOR_TESTING)) {
1849 page->ClearFlag(Page::COMPACTION_WAS_ABORTED_FOR_TESTING); 1883 page->ClearFlag(Page::COMPACTION_WAS_ABORTED_FOR_TESTING);
1850 } else { 1884 } else {
1851 page->SetFlag(Page::COMPACTION_WAS_ABORTED_FOR_TESTING); 1885 page->SetFlag(Page::COMPACTION_WAS_ABORTED_FOR_TESTING);
1852 return true; 1886 return true;
1853 } 1887 }
1854 } 1888 }
1855 } 1889 }
1856 return false; 1890 return false;
1857 } 1891 }
1858 #endif // VERIFY_HEAP 1892 #endif // VERIFY_HEAP
1859 1893
1860 Heap* heap_; 1894 Heap* heap_;
1861 CompactionSpaceCollection* compaction_spaces_; 1895 CompactionSpaceCollection* compaction_spaces_;
1862 RecordMigratedSlotVisitor* record_visitor_; 1896 RecordMigratedSlotVisitor* record_visitor_;
1863 bool profiling_; 1897 std::vector<MigrationObserver*> observers_;
1898 MigrateFunction migration_function_;
1864 }; 1899 };
1865 1900
1866 class EvacuateNewSpaceVisitor final : public EvacuateVisitorBase { 1901 class EvacuateNewSpaceVisitor final : public EvacuateVisitorBase {
1867 public: 1902 public:
1868 static const intptr_t kLabSize = 4 * KB; 1903 static const intptr_t kLabSize = 4 * KB;
1869 static const intptr_t kMaxLabObjectSize = 256; 1904 static const intptr_t kMaxLabObjectSize = 256;
1870 1905
1871 explicit EvacuateNewSpaceVisitor(Heap* heap, 1906 explicit EvacuateNewSpaceVisitor(Heap* heap,
1872 CompactionSpaceCollection* compaction_spaces, 1907 CompactionSpaceCollection* compaction_spaces,
1873 RecordMigratedSlotVisitor* record_visitor, 1908 RecordMigratedSlotVisitor* record_visitor,
(...skipping 1326 matching lines...) Expand 10 before | Expand all | Expand 10 after
3200 &local_pretenuring_feedback_), 3235 &local_pretenuring_feedback_),
3201 3236
3202 old_space_visitor_(heap_, &compaction_spaces_, record_visitor), 3237 old_space_visitor_(heap_, &compaction_spaces_, record_visitor),
3203 duration_(0.0), 3238 duration_(0.0),
3204 bytes_compacted_(0) {} 3239 bytes_compacted_(0) {}
3205 3240
3206 virtual ~Evacuator() {} 3241 virtual ~Evacuator() {}
3207 3242
3208 bool EvacuatePage(Page* page); 3243 bool EvacuatePage(Page* page);
3209 3244
3245 void AddObserver(MigrationObserver* observer) {
3246 new_space_visitor_.AddObserver(observer);
3247 old_space_visitor_.AddObserver(observer);
3248 }
3249
3210 // Merge back locally cached info sequentially. Note that this method needs 3250 // Merge back locally cached info sequentially. Note that this method needs
3211 // to be called from the main thread. 3251 // to be called from the main thread.
3212 inline void Finalize(); 3252 inline void Finalize();
3213 3253
3214 CompactionSpaceCollection* compaction_spaces() { return &compaction_spaces_; } 3254 CompactionSpaceCollection* compaction_spaces() { return &compaction_spaces_; }
3215 AllocationInfo CloseNewSpaceLAB() { return new_space_visitor_.CloseLAB(); } 3255 AllocationInfo CloseNewSpaceLAB() { return new_space_visitor_.CloseLAB(); }
3216 3256
3217 protected: 3257 protected:
3218 static const int kInitialLocalPretenuringFeedbackCapacity = 256; 3258 static const int kInitialLocalPretenuringFeedbackCapacity = 256;
3219 3259
(...skipping 177 matching lines...) Expand 10 before | Expand all | Expand 10 after
3397 // sweeper, which will happen after updating pointers. 3437 // sweeper, which will happen after updating pointers.
3398 *data += 1; 3438 *data += 1;
3399 } 3439 }
3400 break; 3440 break;
3401 default: 3441 default:
3402 UNREACHABLE(); 3442 UNREACHABLE();
3403 } 3443 }
3404 } 3444 }
3405 }; 3445 };
3406 3446
3447 template <class Evacuator, class Collector>
3448 void MarkCompactCollectorBase::CreateAndExecuteEvacuationTasks(
3449 Collector* collector, PageParallelJob<EvacuationJobTraits>* job,
3450 RecordMigratedSlotVisitor* record_visitor, const intptr_t live_bytes,
3451 const int& abandoned_pages) {
3452 // Used for trace summary.
3453 double compaction_speed = 0;
3454 if (FLAG_trace_evacuation) {
3455 compaction_speed = heap()->tracer()->CompactionSpeedInBytesPerMillisecond();
3456 }
3457
3458 const bool profiling =
3459 heap()->isolate()->is_profiling() ||
3460 heap()->isolate()->logger()->is_logging_code_events() ||
3461 heap()->isolate()->heap_profiler()->is_tracking_object_moves();
3462 ProfilingMigrationObserver profiling_observer(heap());
3463
3464 const int wanted_num_tasks =
3465 NumberOfParallelCompactionTasks(job->NumberOfPages(), live_bytes);
3466 Evacuator** evacuators = new Evacuator*[wanted_num_tasks];
3467 for (int i = 0; i < wanted_num_tasks; i++) {
3468 evacuators[i] = new Evacuator(collector, record_visitor);
3469 if (profiling) evacuators[i]->AddObserver(&profiling_observer);
3470 }
3471 job->Run(wanted_num_tasks, [evacuators](int i) { return evacuators[i]; });
3472 const Address top = heap()->new_space()->top();
3473 for (int i = 0; i < wanted_num_tasks; i++) {
3474 evacuators[i]->Finalize();
3475 // Try to find the last LAB that was used for new space allocation in
3476 // evacuation tasks. If it was adjacent to the current top, move top back.
3477 const AllocationInfo info = evacuators[i]->CloseNewSpaceLAB();
3478 if (info.limit() != nullptr && info.limit() == top) {
3479 DCHECK_NOT_NULL(info.top());
3480 *heap()->new_space()->allocation_top_address() = info.top();
3481 }
3482 delete evacuators[i];
3483 }
3484 delete[] evacuators;
3485
3486 if (FLAG_trace_evacuation) {
3487 PrintIsolate(isolate(),
3488 "%8.0f ms: evacuation-summary: parallel=%s pages=%d "
3489 "aborted=%d wanted_tasks=%d tasks=%d cores=%" PRIuS
3490 " live_bytes=%" V8PRIdPTR " compaction_speed=%.f\n",
3491 isolate()->time_millis_since_init(),
3492 FLAG_parallel_compaction ? "yes" : "no", job->NumberOfPages(),
3493 abandoned_pages, wanted_num_tasks, job->NumberOfTasks(),
3494 V8::GetCurrentPlatform()->NumberOfAvailableBackgroundThreads(),
3495 live_bytes, compaction_speed);
3496 }
3497 }
3498
3407 void MarkCompactCollector::EvacuatePagesInParallel() { 3499 void MarkCompactCollector::EvacuatePagesInParallel() {
3408 PageParallelJob<EvacuationJobTraits> job( 3500 PageParallelJob<EvacuationJobTraits> job(
3409 heap_, heap_->isolate()->cancelable_task_manager(), 3501 heap_, heap_->isolate()->cancelable_task_manager(),
3410 &page_parallel_job_semaphore_); 3502 &page_parallel_job_semaphore_);
3411 3503
3412 int abandoned_pages = 0; 3504 int abandoned_pages = 0;
3413 intptr_t live_bytes = 0; 3505 intptr_t live_bytes = 0;
3414 for (Page* page : old_space_evacuation_pages_) { 3506 for (Page* page : old_space_evacuation_pages_) {
3415 live_bytes += MarkingState::Internal(page).live_bytes(); 3507 live_bytes += MarkingState::Internal(page).live_bytes();
3416 job.AddPage(page, &abandoned_pages); 3508 job.AddPage(page, &abandoned_pages);
(...skipping 12 matching lines...) Expand all
3429 EvacuateNewSpacePageVisitor<NEW_TO_OLD>::Move(page); 3521 EvacuateNewSpacePageVisitor<NEW_TO_OLD>::Move(page);
3430 } else { 3522 } else {
3431 EvacuateNewSpacePageVisitor<NEW_TO_NEW>::Move(page); 3523 EvacuateNewSpacePageVisitor<NEW_TO_NEW>::Move(page);
3432 } 3524 }
3433 } 3525 }
3434 3526
3435 job.AddPage(page, &abandoned_pages); 3527 job.AddPage(page, &abandoned_pages);
3436 } 3528 }
3437 DCHECK_GE(job.NumberOfPages(), 1); 3529 DCHECK_GE(job.NumberOfPages(), 1);
3438 3530
3439 // Used for trace summary.
3440 double compaction_speed = 0;
3441 if (FLAG_trace_evacuation) {
3442 compaction_speed = heap()->tracer()->CompactionSpeedInBytesPerMillisecond();
3443 }
3444
3445 const int wanted_num_tasks =
3446 NumberOfParallelCompactionTasks(job.NumberOfPages(), live_bytes);
3447 FullEvacuator** evacuators = new FullEvacuator*[wanted_num_tasks];
3448 RecordMigratedSlotVisitor record_visitor(this); 3531 RecordMigratedSlotVisitor record_visitor(this);
3449 for (int i = 0; i < wanted_num_tasks; i++) { 3532 CreateAndExecuteEvacuationTasks<FullEvacuator>(this, &job, &record_visitor,
3450 evacuators[i] = new FullEvacuator(this, &record_visitor); 3533 live_bytes, abandoned_pages);
3451 }
3452 job.Run(wanted_num_tasks, [evacuators](int i) { return evacuators[i]; });
3453 const Address top = heap()->new_space()->top();
3454 for (int i = 0; i < wanted_num_tasks; i++) {
3455 evacuators[i]->Finalize();
3456 // Try to find the last LAB that was used for new space allocation in
3457 // evacuation tasks. If it was adjacent to the current top, move top back.
3458 const AllocationInfo info = evacuators[i]->CloseNewSpaceLAB();
3459 if (info.limit() != nullptr && info.limit() == top) {
3460 DCHECK_NOT_NULL(info.top());
3461 *heap()->new_space()->allocation_top_address() = info.top();
3462 }
3463 delete evacuators[i];
3464 }
3465 delete[] evacuators;
3466
3467 if (FLAG_trace_evacuation) {
3468 PrintIsolate(isolate(),
3469 "%8.0f ms: evacuation-summary: parallel=%s pages=%d "
3470 "aborted=%d wanted_tasks=%d tasks=%d cores=%" PRIuS
3471 " live_bytes=%" V8PRIdPTR " compaction_speed=%.f\n",
3472 isolate()->time_millis_since_init(),
3473 FLAG_parallel_compaction ? "yes" : "no", job.NumberOfPages(),
3474 abandoned_pages, wanted_num_tasks, job.NumberOfTasks(),
3475 V8::GetCurrentPlatform()->NumberOfAvailableBackgroundThreads(),
3476 live_bytes, compaction_speed);
3477 }
3478 } 3534 }
3479 3535
3480 class EvacuationWeakObjectRetainer : public WeakObjectRetainer { 3536 class EvacuationWeakObjectRetainer : public WeakObjectRetainer {
3481 public: 3537 public:
3482 virtual Object* RetainAs(Object* object) { 3538 virtual Object* RetainAs(Object* object) {
3483 if (object->IsHeapObject()) { 3539 if (object->IsHeapObject()) {
3484 HeapObject* heap_object = HeapObject::cast(object); 3540 HeapObject* heap_object = HeapObject::cast(object);
3485 MapWord map_word = heap_object->map_word(); 3541 MapWord map_word = heap_object->map_word();
3486 if (map_word.IsForwardingAddress()) { 3542 if (map_word.IsForwardingAddress()) {
3487 return map_word.ToForwardingAddress(); 3543 return map_word.ToForwardingAddress();
(...skipping 726 matching lines...) Expand 10 before | Expand all | Expand 10 after
4214 // The target is always in old space, we don't have to record the slot in 4270 // The target is always in old space, we don't have to record the slot in
4215 // the old-to-new remembered set. 4271 // the old-to-new remembered set.
4216 DCHECK(!heap()->InNewSpace(target)); 4272 DCHECK(!heap()->InNewSpace(target));
4217 RecordRelocSlot(host, &rinfo, target); 4273 RecordRelocSlot(host, &rinfo, target);
4218 } 4274 }
4219 } 4275 }
4220 } 4276 }
4221 4277
4222 } // namespace internal 4278 } // namespace internal
4223 } // namespace v8 4279 } // namespace v8
OLDNEW
« no previous file with comments | « src/heap/mark-compact.h ('k') | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698