Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "src/heap/mark-compact.h" | 5 #include "src/heap/mark-compact.h" |
| 6 | 6 |
| 7 #include "src/base/atomicops.h" | 7 #include "src/base/atomicops.h" |
| 8 #include "src/base/bits.h" | 8 #include "src/base/bits.h" |
| 9 #include "src/base/sys-info.h" | 9 #include "src/base/sys-info.h" |
| 10 #include "src/code-stubs.h" | 10 #include "src/code-stubs.h" |
| (...skipping 454 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 465 AllocationSpace space_to_start) | 465 AllocationSpace space_to_start) |
| 466 : sweeper_(sweeper), | 466 : sweeper_(sweeper), |
| 467 pending_sweeper_tasks_(pending_sweeper_tasks), | 467 pending_sweeper_tasks_(pending_sweeper_tasks), |
| 468 space_to_start_(space_to_start) {} | 468 space_to_start_(space_to_start) {} |
| 469 | 469 |
| 470 virtual ~SweeperTask() {} | 470 virtual ~SweeperTask() {} |
| 471 | 471 |
| 472 private: | 472 private: |
| 473 // v8::Task overrides. | 473 // v8::Task overrides. |
| 474 void Run() override { | 474 void Run() override { |
| 475 DCHECK_GE(space_to_start_, FIRST_PAGED_SPACE); | 475 DCHECK_GE(space_to_start_, FIRST_SPACE); |
| 476 DCHECK_LE(space_to_start_, LAST_PAGED_SPACE); | 476 DCHECK_LE(space_to_start_, LAST_PAGED_SPACE); |
| 477 const int offset = space_to_start_ - FIRST_PAGED_SPACE; | 477 const int offset = space_to_start_ - FIRST_SPACE; |
| 478 const int num_spaces = LAST_PAGED_SPACE - FIRST_PAGED_SPACE + 1; | 478 const int num_spaces = LAST_PAGED_SPACE - FIRST_SPACE + 1; |
| 479 for (int i = 0; i < num_spaces; i++) { | 479 for (int i = 0; i < num_spaces; i++) { |
| 480 const int space_id = FIRST_PAGED_SPACE + ((i + offset) % num_spaces); | 480 const int space_id = FIRST_SPACE + ((i + offset) % num_spaces); |
| 481 DCHECK_GE(space_id, FIRST_PAGED_SPACE); | 481 DCHECK_GE(space_id, FIRST_SPACE); |
| 482 DCHECK_LE(space_id, LAST_PAGED_SPACE); | 482 DCHECK_LE(space_id, LAST_PAGED_SPACE); |
| 483 sweeper_->ParallelSweepSpace(static_cast<AllocationSpace>(space_id), 0); | 483 sweeper_->ParallelSweepSpace(static_cast<AllocationSpace>(space_id), 0); |
| 484 } | 484 } |
| 485 pending_sweeper_tasks_->Signal(); | 485 pending_sweeper_tasks_->Signal(); |
| 486 } | 486 } |
| 487 | 487 |
| 488 Sweeper* sweeper_; | 488 Sweeper* sweeper_; |
| 489 base::Semaphore* pending_sweeper_tasks_; | 489 base::Semaphore* pending_sweeper_tasks_; |
| 490 AllocationSpace space_to_start_; | 490 AllocationSpace space_to_start_; |
| 491 | 491 |
| (...skipping 17 matching lines...) Expand all Loading... | |
| 509 void MarkCompactCollector::Sweeper::StartSweepingHelper( | 509 void MarkCompactCollector::Sweeper::StartSweepingHelper( |
| 510 AllocationSpace space_to_start) { | 510 AllocationSpace space_to_start) { |
| 511 num_sweeping_tasks_++; | 511 num_sweeping_tasks_++; |
| 512 V8::GetCurrentPlatform()->CallOnBackgroundThread( | 512 V8::GetCurrentPlatform()->CallOnBackgroundThread( |
| 513 new SweeperTask(this, &pending_sweeper_tasks_semaphore_, space_to_start), | 513 new SweeperTask(this, &pending_sweeper_tasks_semaphore_, space_to_start), |
| 514 v8::Platform::kShortRunningTask); | 514 v8::Platform::kShortRunningTask); |
| 515 } | 515 } |
| 516 | 516 |
| 517 void MarkCompactCollector::Sweeper::SweepOrWaitUntilSweepingCompleted( | 517 void MarkCompactCollector::Sweeper::SweepOrWaitUntilSweepingCompleted( |
| 518 Page* page) { | 518 Page* page) { |
| 519 PagedSpace* owner = reinterpret_cast<PagedSpace*>(page->owner()); | |
| 520 if (!page->SweepingDone()) { | 519 if (!page->SweepingDone()) { |
| 521 ParallelSweepPage(page, owner); | 520 PagedSpace* owner = reinterpret_cast<PagedSpace*>(page->owner()); |
| 521 ParallelSweepPage(page, owner->identity()); | |
| 522 if (!page->SweepingDone()) { | 522 if (!page->SweepingDone()) { |
| 523 // We were not able to sweep that page, i.e., a concurrent | 523 // We were not able to sweep that page, i.e., a concurrent |
| 524 // sweeper thread currently owns this page. Wait for the sweeper | 524 // sweeper thread currently owns this page. Wait for the sweeper |
| 525 // thread to be done with this page. | 525 // thread to be done with this page. |
| 526 page->WaitUntilSweepingCompleted(); | 526 page->WaitUntilSweepingCompleted(); |
| 527 } | 527 } |
| 528 } | 528 } |
| 529 } | 529 } |
| 530 | 530 |
| 531 void MarkCompactCollector::SweepAndRefill(CompactionSpace* space) { | 531 void MarkCompactCollector::SweepAndRefill(CompactionSpace* space) { |
| (...skipping 22 matching lines...) Expand all Loading... | |
| 554 [this](AllocationSpace space) { ParallelSweepSpace(space, 0); }); | 554 [this](AllocationSpace space) { ParallelSweepSpace(space, 0); }); |
| 555 } | 555 } |
| 556 | 556 |
| 557 if (FLAG_concurrent_sweeping) { | 557 if (FLAG_concurrent_sweeping) { |
| 558 while (num_sweeping_tasks_ > 0) { | 558 while (num_sweeping_tasks_ > 0) { |
| 559 pending_sweeper_tasks_semaphore_.Wait(); | 559 pending_sweeper_tasks_semaphore_.Wait(); |
| 560 num_sweeping_tasks_--; | 560 num_sweeping_tasks_--; |
| 561 } | 561 } |
| 562 } | 562 } |
| 563 | 563 |
| 564 ForAllSweepingSpaces( | 564 ForAllSweepingSpaces([this](AllocationSpace space) { |
| 565 [this](AllocationSpace space) { DCHECK(sweeping_list_[space].empty()); }); | 565 if (space == NEW_SPACE) { |
| 566 swept_list_[NEW_SPACE].Clear(); | |
| 567 } | |
| 568 DCHECK(sweeping_list_[space].empty()); | |
| 569 }); | |
| 566 late_pages_ = false; | 570 late_pages_ = false; |
| 567 sweeping_in_progress_ = false; | 571 sweeping_in_progress_ = false; |
| 568 } | 572 } |
| 569 | 573 |
| 574 void MarkCompactCollector::Sweeper::EnsureNewSpaceCompleted() { | |
| 575 if (!sweeping_in_progress_) return; | |
| 576 if (!FLAG_concurrent_sweeping || !IsSweepingCompleted()) { | |
| 577 NewSpacePageIterator pit(heap_->new_space()); | |
| 578 while (pit.has_next()) { | |
| 579 Page* page = pit.next(); | |
| 580 SweepOrWaitUntilSweepingCompleted(page); | |
| 581 } | |
| 582 } | |
| 583 } | |
| 584 | |
| 570 void MarkCompactCollector::EnsureSweepingCompleted() { | 585 void MarkCompactCollector::EnsureSweepingCompleted() { |
| 571 if (!sweeper().sweeping_in_progress()) return; | 586 if (!sweeper().sweeping_in_progress()) return; |
| 572 | 587 |
| 573 sweeper().EnsureCompleted(); | 588 sweeper().EnsureCompleted(); |
| 574 heap()->old_space()->RefillFreeList(); | 589 heap()->old_space()->RefillFreeList(); |
| 575 heap()->code_space()->RefillFreeList(); | 590 heap()->code_space()->RefillFreeList(); |
| 576 heap()->map_space()->RefillFreeList(); | 591 heap()->map_space()->RefillFreeList(); |
| 577 | 592 |
| 578 #ifdef VERIFY_HEAP | 593 #ifdef VERIFY_HEAP |
| 579 if (FLAG_verify_heap && !evacuation()) { | 594 if (FLAG_verify_heap && !evacuation()) { |
| (...skipping 1285 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1865 AllocationSpace space_to_allocate_; | 1880 AllocationSpace space_to_allocate_; |
| 1866 intptr_t promoted_size_; | 1881 intptr_t promoted_size_; |
| 1867 intptr_t semispace_copied_size_; | 1882 intptr_t semispace_copied_size_; |
| 1868 HashMap* local_pretenuring_feedback_; | 1883 HashMap* local_pretenuring_feedback_; |
| 1869 }; | 1884 }; |
| 1870 | 1885 |
| 1871 class MarkCompactCollector::EvacuateNewSpacePageVisitor final | 1886 class MarkCompactCollector::EvacuateNewSpacePageVisitor final |
| 1872 : public MarkCompactCollector::HeapObjectVisitor { | 1887 : public MarkCompactCollector::HeapObjectVisitor { |
| 1873 public: | 1888 public: |
| 1874 explicit EvacuateNewSpacePageVisitor(Heap* heap) | 1889 explicit EvacuateNewSpacePageVisitor(Heap* heap) |
| 1875 : heap_(heap), promoted_size_(0) {} | 1890 : heap_(heap), promoted_size_(0), semispace_copied_size_(0) {} |
| 1876 | 1891 |
| 1877 static void TryMoveToOldSpace(Page* page, PagedSpace* owner) { | 1892 static void TryMoveToOldSpace(Page* page, PagedSpace* owner) { |
| 1878 if (page->heap()->new_space()->ReplaceWithEmptyPage(page)) { | 1893 if (page->heap()->new_space()->ReplaceWithEmptyPage(page)) { |
| 1879 Page* new_page = Page::ConvertNewToOld(page, owner); | 1894 Page* new_page = Page::ConvertNewToOld(page, owner); |
| 1880 new_page->SetFlag(Page::PAGE_NEW_OLD_PROMOTION); | 1895 new_page->SetFlag(Page::PAGE_NEW_OLD_PROMOTION); |
| 1881 } | 1896 } |
| 1882 } | 1897 } |
| 1883 | 1898 |
| 1899 static void MoveToToSpace(Page* page, Space* owner) { | |
| 1900 page->heap()->new_space()->AddPageToToSpace(page); | |
| 1901 page->SetFlag(Page::PAGE_NEW_NEW_PROMOTION); | |
| 1902 } | |
| 1903 | |
| 1884 inline bool Visit(HeapObject* object) { | 1904 inline bool Visit(HeapObject* object) { |
| 1885 RecordMigratedSlotVisitor visitor(heap_->mark_compact_collector()); | 1905 RecordMigratedSlotVisitor visitor(heap_->mark_compact_collector()); |
| 1886 object->IterateBodyFast(&visitor); | 1906 object->IterateBodyFast(&visitor); |
| 1887 promoted_size_ += object->Size(); | 1907 promoted_size_ += object->Size(); |
| 1888 return true; | 1908 return true; |
| 1889 } | 1909 } |
| 1890 | 1910 |
| 1891 intptr_t promoted_size() { return promoted_size_; } | 1911 intptr_t promoted_size() { return promoted_size_; } |
| 1912 intptr_t semispace_copied_size() { return semispace_copied_size_; } | |
| 1913 | |
| 1914 void account_semispace_copied(intptr_t copied) { | |
| 1915 semispace_copied_size_ += copied; | |
| 1916 } | |
| 1892 | 1917 |
| 1893 private: | 1918 private: |
| 1894 Heap* heap_; | 1919 Heap* heap_; |
| 1895 intptr_t promoted_size_; | 1920 intptr_t promoted_size_; |
| 1921 intptr_t semispace_copied_size_; | |
| 1896 }; | 1922 }; |
| 1897 | 1923 |
| 1898 class MarkCompactCollector::EvacuateOldSpaceVisitor final | 1924 class MarkCompactCollector::EvacuateOldSpaceVisitor final |
| 1899 : public MarkCompactCollector::EvacuateVisitorBase { | 1925 : public MarkCompactCollector::EvacuateVisitorBase { |
| 1900 public: | 1926 public: |
| 1901 EvacuateOldSpaceVisitor(Heap* heap, | 1927 EvacuateOldSpaceVisitor(Heap* heap, |
| 1902 CompactionSpaceCollection* compaction_spaces) | 1928 CompactionSpaceCollection* compaction_spaces) |
| 1903 : EvacuateVisitorBase(heap, compaction_spaces) {} | 1929 : EvacuateVisitorBase(heap, compaction_spaces) {} |
| 1904 | 1930 |
| 1905 inline bool Visit(HeapObject* object) override { | 1931 inline bool Visit(HeapObject* object) override { |
| (...skipping 1123 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 3029 NewSpace* new_space = heap()->new_space(); | 3055 NewSpace* new_space = heap()->new_space(); |
| 3030 NewSpacePageIterator it(new_space->bottom(), new_space->top()); | 3056 NewSpacePageIterator it(new_space->bottom(), new_space->top()); |
| 3031 // Append the list of new space pages to be processed. | 3057 // Append the list of new space pages to be processed. |
| 3032 while (it.has_next()) { | 3058 while (it.has_next()) { |
| 3033 newspace_evacuation_candidates_.Add(it.next()); | 3059 newspace_evacuation_candidates_.Add(it.next()); |
| 3034 } | 3060 } |
| 3035 new_space->Flip(); | 3061 new_space->Flip(); |
| 3036 new_space->ResetAllocationInfo(); | 3062 new_space->ResetAllocationInfo(); |
| 3037 } | 3063 } |
| 3038 | 3064 |
| 3039 void MarkCompactCollector::EvacuateNewSpaceEpilogue() { | |
| 3040 newspace_evacuation_candidates_.Rewind(0); | |
| 3041 } | |
| 3042 | |
| 3043 class MarkCompactCollector::Evacuator : public Malloced { | 3065 class MarkCompactCollector::Evacuator : public Malloced { |
| 3044 public: | 3066 public: |
| 3067 enum EvacuationMode { | |
| 3068 kObjectsNewToOld, | |
| 3069 kPageNewToOld, | |
| 3070 kObjectsOldToOld, | |
| 3071 kPageNewToNew, | |
| 3072 }; | |
| 3073 | |
| 3074 static inline EvacuationMode ComputeEvacuationMode(MemoryChunk* chunk) { | |
| 3075 // Note: The order of checks is important in this function. | |
| 3076 if (chunk->IsFlagSet(MemoryChunk::PAGE_NEW_OLD_PROMOTION)) | |
| 3077 return kPageNewToOld; | |
| 3078 if (chunk->IsFlagSet(MemoryChunk::PAGE_NEW_NEW_PROMOTION)) | |
| 3079 return kPageNewToNew; | |
| 3080 if (chunk->InNewSpace()) return kObjectsNewToOld; | |
| 3081 DCHECK(chunk->IsEvacuationCandidate()); | |
| 3082 return kObjectsOldToOld; | |
| 3083 } | |
| 3084 | |
| 3045 // NewSpacePages with more live bytes than this threshold qualify for fast | 3085 // NewSpacePages with more live bytes than this threshold qualify for fast |
| 3046 // evacuation. | 3086 // evacuation. |
| 3047 static int PageEvacuationThreshold() { | 3087 static int PageEvacuationThreshold() { |
| 3048 if (FLAG_page_promotion) | 3088 if (FLAG_page_promotion) |
| 3049 return FLAG_page_promotion_threshold * Page::kAllocatableMemory / 100; | 3089 return FLAG_page_promotion_threshold * Page::kAllocatableMemory / 100; |
| 3050 return Page::kAllocatableMemory + kPointerSize; | 3090 return Page::kAllocatableMemory + kPointerSize; |
| 3051 } | 3091 } |
| 3052 | 3092 |
| 3053 explicit Evacuator(MarkCompactCollector* collector) | 3093 explicit Evacuator(MarkCompactCollector* collector) |
| 3054 : collector_(collector), | 3094 : collector_(collector), |
| 3055 compaction_spaces_(collector->heap()), | 3095 compaction_spaces_(collector->heap()), |
| 3056 local_pretenuring_feedback_(HashMap::PointersMatch, | 3096 local_pretenuring_feedback_(HashMap::PointersMatch, |
| 3057 kInitialLocalPretenuringFeedbackCapacity), | 3097 kInitialLocalPretenuringFeedbackCapacity), |
| 3058 new_space_visitor_(collector->heap(), &compaction_spaces_, | 3098 new_space_visitor_(collector->heap(), &compaction_spaces_, |
| 3059 &local_pretenuring_feedback_), | 3099 &local_pretenuring_feedback_), |
| 3060 new_space_page_visitor(collector->heap()), | 3100 new_space_page_visitor(collector->heap()), |
| 3061 old_space_visitor_(collector->heap(), &compaction_spaces_), | 3101 old_space_visitor_(collector->heap(), &compaction_spaces_), |
| 3062 duration_(0.0), | 3102 duration_(0.0), |
| 3063 bytes_compacted_(0) {} | 3103 bytes_compacted_(0) {} |
| 3064 | 3104 |
| 3065 inline bool EvacuatePage(Page* chunk); | 3105 inline bool EvacuatePage(Page* chunk); |
| 3066 | 3106 |
| 3067 // Merge back locally cached info sequentially. Note that this method needs | 3107 // Merge back locally cached info sequentially. Note that this method needs |
| 3068 // to be called from the main thread. | 3108 // to be called from the main thread. |
| 3069 inline void Finalize(); | 3109 inline void Finalize(); |
| 3070 | 3110 |
| 3071 CompactionSpaceCollection* compaction_spaces() { return &compaction_spaces_; } | 3111 CompactionSpaceCollection* compaction_spaces() { return &compaction_spaces_; } |
| 3072 | 3112 |
| 3073 private: | 3113 private: |
| 3074 enum EvacuationMode { | |
| 3075 kObjectsNewToOld, | |
| 3076 kPageNewToOld, | |
| 3077 kObjectsOldToOld, | |
| 3078 }; | |
| 3079 | |
| 3080 static const int kInitialLocalPretenuringFeedbackCapacity = 256; | 3114 static const int kInitialLocalPretenuringFeedbackCapacity = 256; |
| 3081 | 3115 |
| 3082 inline Heap* heap() { return collector_->heap(); } | 3116 inline Heap* heap() { return collector_->heap(); } |
| 3083 | 3117 |
| 3084 inline EvacuationMode ComputeEvacuationMode(MemoryChunk* chunk) { | |
| 3085 // Note: The order of checks is important in this function. | |
| 3086 if (chunk->InNewSpace()) return kObjectsNewToOld; | |
| 3087 if (chunk->IsFlagSet(MemoryChunk::PAGE_NEW_OLD_PROMOTION)) | |
| 3088 return kPageNewToOld; | |
| 3089 DCHECK(chunk->IsEvacuationCandidate()); | |
| 3090 return kObjectsOldToOld; | |
| 3091 } | |
| 3092 | |
| 3093 void ReportCompactionProgress(double duration, intptr_t bytes_compacted) { | 3118 void ReportCompactionProgress(double duration, intptr_t bytes_compacted) { |
| 3094 duration_ += duration; | 3119 duration_ += duration; |
| 3095 bytes_compacted_ += bytes_compacted; | 3120 bytes_compacted_ += bytes_compacted; |
| 3096 } | 3121 } |
| 3097 | 3122 |
| 3098 template <IterationMode mode, class Visitor> | |
| 3099 inline bool EvacuateSinglePage(Page* p, Visitor* visitor); | |
| 3100 | |
| 3101 MarkCompactCollector* collector_; | 3123 MarkCompactCollector* collector_; |
| 3102 | 3124 |
| 3103 // Locally cached collector data. | 3125 // Locally cached collector data. |
| 3104 CompactionSpaceCollection compaction_spaces_; | 3126 CompactionSpaceCollection compaction_spaces_; |
| 3105 HashMap local_pretenuring_feedback_; | 3127 HashMap local_pretenuring_feedback_; |
| 3106 | 3128 |
| 3107 // Visitors for the corresponding spaces. | 3129 // Visitors for the corresponding spaces. |
| 3108 EvacuateNewSpaceVisitor new_space_visitor_; | 3130 EvacuateNewSpaceVisitor new_space_visitor_; |
| 3109 EvacuateNewSpacePageVisitor new_space_page_visitor; | 3131 EvacuateNewSpacePageVisitor new_space_page_visitor; |
| 3110 EvacuateOldSpaceVisitor old_space_visitor_; | 3132 EvacuateOldSpaceVisitor old_space_visitor_; |
| 3111 | 3133 |
| 3112 // Book keeping info. | 3134 // Book keeping info. |
| 3113 double duration_; | 3135 double duration_; |
| 3114 intptr_t bytes_compacted_; | 3136 intptr_t bytes_compacted_; |
| 3115 }; | 3137 }; |
| 3116 | 3138 |
| 3117 template <MarkCompactCollector::IterationMode mode, class Visitor> | 3139 bool MarkCompactCollector::Evacuator::EvacuatePage(Page* page) { |
| 3118 bool MarkCompactCollector::Evacuator::EvacuateSinglePage(Page* p, | |
| 3119 Visitor* visitor) { | |
| 3120 bool success = false; | 3140 bool success = false; |
| 3121 DCHECK(p->IsEvacuationCandidate() || p->InNewSpace() || | 3141 DCHECK(page->SweepingDone()); |
| 3122 p->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION)); | 3142 int saved_live_bytes = page->LiveBytes(); |
| 3123 int saved_live_bytes = p->LiveBytes(); | 3143 double evacuation_time = 0.0; |
| 3124 double evacuation_time; | 3144 Heap* heap = page->heap(); |
| 3125 { | 3145 { |
| 3126 AlwaysAllocateScope always_allocate(heap()->isolate()); | 3146 AlwaysAllocateScope always_allocate(heap->isolate()); |
| 3127 TimedScope timed_scope(&evacuation_time); | 3147 TimedScope timed_scope(&evacuation_time); |
| 3128 success = collector_->VisitLiveObjects<Visitor>(p, visitor, mode); | 3148 switch (ComputeEvacuationMode(page)) { |
| 3149 case kObjectsNewToOld: | |
| 3150 success = collector_->VisitLiveObjects(page, &new_space_visitor_, | |
| 3151 kClearMarkbits); | |
| 3152 ArrayBufferTracker::ProcessBuffers( | |
| 3153 page, ArrayBufferTracker::kUpdateForwardedRemoveOthers); | |
| 3154 DCHECK(success); | |
| 3155 break; | |
| 3156 case kPageNewToOld: | |
| 3157 success = collector_->VisitLiveObjects(page, &new_space_page_visitor, | |
| 3158 kKeepMarking); | |
| 3159 // ArrayBufferTracker will be updated during sweeping. | |
| 3160 DCHECK(success); | |
| 3161 break; | |
| 3162 case kPageNewToNew: | |
| 3163 new_space_page_visitor.account_semispace_copied(page->LiveBytes()); | |
| 3164 // ArrayBufferTracker will be updated during sweeping. | |
| 3165 success = true; | |
| 3166 break; | |
| 3167 case kObjectsOldToOld: | |
| 3168 success = collector_->VisitLiveObjects(page, &old_space_visitor_, | |
| 3169 kClearMarkbits); | |
| 3170 if (!success) { | |
| 3171 // Aborted compaction page. We have to record slots here, since we | |
| 3172 // might not have recorded them in first place. | |
| 3173 // Note: We mark the page as aborted here to be able to record slots | |
| 3174 // for code objects in |RecordMigratedSlotVisitor|. | |
| 3175 page->SetFlag(Page::COMPACTION_WAS_ABORTED); | |
| 3176 EvacuateRecordOnlyVisitor record_visitor(collector_->heap()); | |
| 3177 success = | |
| 3178 collector_->VisitLiveObjects(page, &record_visitor, kKeepMarking); | |
| 3179 ArrayBufferTracker::ProcessBuffers( | |
| 3180 page, ArrayBufferTracker::kUpdateForwardedKeepOthers); | |
| 3181 DCHECK(success); | |
| 3182 // We need to return failure here to indicate that we want this page | |
| 3183 // added to the sweeper. | |
| 3184 success = false; | |
| 3185 } else { | |
| 3186 ArrayBufferTracker::ProcessBuffers( | |
| 3187 page, ArrayBufferTracker::kUpdateForwardedRemoveOthers); | |
| 3188 } | |
| 3189 break; | |
| 3190 default: | |
| 3191 UNREACHABLE(); | |
| 3192 } | |
| 3129 } | 3193 } |
| 3194 ReportCompactionProgress(evacuation_time, saved_live_bytes); | |
| 3130 if (FLAG_trace_evacuation) { | 3195 if (FLAG_trace_evacuation) { |
| 3131 const char age_mark_tag = | 3196 PrintIsolate(heap->isolate(), |
| 3132 !p->InNewSpace() | 3197 "evacuation[%p]: page=%p new_space=%d " |
| 3133 ? 'x' | 3198 "page_evacuation=%d executable=%d contains_age_mark=%d " |
| 3134 : !p->IsFlagSet(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK) | 3199 "live_bytes=%d time=%f\n", |
| 3135 ? '>' | 3200 static_cast<void*>(this), static_cast<void*>(page), |
| 3136 : !p->ContainsLimit(heap()->new_space()->age_mark()) ? '<' | 3201 page->InNewSpace(), |
| 3137 : '#'; | 3202 page->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION) || |
| 3138 PrintIsolate(heap()->isolate(), | 3203 page->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION), |
| 3139 "evacuation[%p]: page=%p new_space=%d age_mark_tag=%c " | 3204 page->IsFlagSet(MemoryChunk::IS_EXECUTABLE), |
| 3140 "page_evacuation=%d executable=%d live_bytes=%d time=%f\n", | 3205 page->Contains(heap->new_space()->age_mark()), |
| 3141 static_cast<void*>(this), static_cast<void*>(p), | 3206 saved_live_bytes, evacuation_time); |
| 3142 p->InNewSpace(), age_mark_tag, | |
| 3143 p->IsFlagSet(MemoryChunk::PAGE_NEW_OLD_PROMOTION), | |
| 3144 p->IsFlagSet(MemoryChunk::IS_EXECUTABLE), saved_live_bytes, | |
| 3145 evacuation_time); | |
| 3146 } | |
| 3147 if (success) { | |
| 3148 ReportCompactionProgress(evacuation_time, saved_live_bytes); | |
| 3149 } | 3207 } |
| 3150 return success; | 3208 return success; |
| 3151 } | 3209 } |
| 3152 | 3210 |
| 3153 bool MarkCompactCollector::Evacuator::EvacuatePage(Page* page) { | |
| 3154 bool result = false; | |
| 3155 DCHECK(page->SweepingDone()); | |
| 3156 switch (ComputeEvacuationMode(page)) { | |
| 3157 case kObjectsNewToOld: | |
| 3158 result = EvacuateSinglePage<kClearMarkbits>(page, &new_space_visitor_); | |
| 3159 ArrayBufferTracker::ProcessBuffers( | |
| 3160 page, ArrayBufferTracker::kUpdateForwardedRemoveOthers); | |
| 3161 DCHECK(result); | |
| 3162 USE(result); | |
| 3163 break; | |
| 3164 case kPageNewToOld: | |
| 3165 result = EvacuateSinglePage<kKeepMarking>(page, &new_space_page_visitor); | |
| 3166 // ArrayBufferTracker will be updated during sweeping. | |
| 3167 DCHECK(result); | |
| 3168 USE(result); | |
| 3169 break; | |
| 3170 case kObjectsOldToOld: | |
| 3171 result = EvacuateSinglePage<kClearMarkbits>(page, &old_space_visitor_); | |
| 3172 if (!result) { | |
| 3173 // Aborted compaction page. We have to record slots here, since we might | |
| 3174 // not have recorded them in first place. | |
| 3175 // Note: We mark the page as aborted here to be able to record slots | |
| 3176 // for code objects in |RecordMigratedSlotVisitor|. | |
| 3177 page->SetFlag(Page::COMPACTION_WAS_ABORTED); | |
| 3178 EvacuateRecordOnlyVisitor record_visitor(collector_->heap()); | |
| 3179 result = EvacuateSinglePage<kKeepMarking>(page, &record_visitor); | |
| 3180 ArrayBufferTracker::ProcessBuffers( | |
| 3181 page, ArrayBufferTracker::kUpdateForwardedKeepOthers); | |
| 3182 DCHECK(result); | |
| 3183 USE(result); | |
| 3184 // We need to return failure here to indicate that we want this page | |
| 3185 // added to the sweeper. | |
| 3186 return false; | |
| 3187 } | |
| 3188 ArrayBufferTracker::ProcessBuffers( | |
| 3189 page, ArrayBufferTracker::kUpdateForwardedRemoveOthers); | |
| 3190 | |
| 3191 break; | |
| 3192 default: | |
| 3193 UNREACHABLE(); | |
| 3194 } | |
| 3195 return result; | |
| 3196 } | |
| 3197 | |
| 3198 void MarkCompactCollector::Evacuator::Finalize() { | 3211 void MarkCompactCollector::Evacuator::Finalize() { |
| 3199 heap()->old_space()->MergeCompactionSpace(compaction_spaces_.Get(OLD_SPACE)); | 3212 heap()->old_space()->MergeCompactionSpace(compaction_spaces_.Get(OLD_SPACE)); |
| 3200 heap()->code_space()->MergeCompactionSpace( | 3213 heap()->code_space()->MergeCompactionSpace( |
| 3201 compaction_spaces_.Get(CODE_SPACE)); | 3214 compaction_spaces_.Get(CODE_SPACE)); |
| 3202 heap()->tracer()->AddCompactionEvent(duration_, bytes_compacted_); | 3215 heap()->tracer()->AddCompactionEvent(duration_, bytes_compacted_); |
| 3203 heap()->IncrementPromotedObjectsSize(new_space_visitor_.promoted_size() + | 3216 heap()->IncrementPromotedObjectsSize(new_space_visitor_.promoted_size() + |
| 3204 new_space_page_visitor.promoted_size()); | 3217 new_space_page_visitor.promoted_size()); |
| 3205 heap()->IncrementSemiSpaceCopiedObjectSize( | 3218 heap()->IncrementSemiSpaceCopiedObjectSize( |
| 3206 new_space_visitor_.semispace_copied_size()); | 3219 new_space_visitor_.semispace_copied_size() + |
| 3220 new_space_page_visitor.semispace_copied_size()); | |
| 3207 heap()->IncrementYoungSurvivorsCounter( | 3221 heap()->IncrementYoungSurvivorsCounter( |
| 3208 new_space_visitor_.promoted_size() + | 3222 new_space_visitor_.promoted_size() + |
| 3209 new_space_visitor_.semispace_copied_size() + | 3223 new_space_visitor_.semispace_copied_size() + |
| 3210 new_space_page_visitor.promoted_size()); | 3224 new_space_page_visitor.promoted_size() + |
| 3225 new_space_page_visitor.semispace_copied_size()); | |
| 3211 heap()->MergeAllocationSitePretenuringFeedback(local_pretenuring_feedback_); | 3226 heap()->MergeAllocationSitePretenuringFeedback(local_pretenuring_feedback_); |
| 3212 } | 3227 } |
| 3213 | 3228 |
| 3214 int MarkCompactCollector::NumberOfParallelCompactionTasks(int pages, | 3229 int MarkCompactCollector::NumberOfParallelCompactionTasks(int pages, |
| 3215 intptr_t live_bytes) { | 3230 intptr_t live_bytes) { |
| 3216 if (!FLAG_parallel_compaction) return 1; | 3231 if (!FLAG_parallel_compaction) return 1; |
| 3217 // Compute the number of needed tasks based on a target compaction time, the | 3232 // Compute the number of needed tasks based on a target compaction time, the |
| 3218 // profiled compaction speed and marked live memory. | 3233 // profiled compaction speed and marked live memory. |
| 3219 // | 3234 // |
| 3220 // The number of parallel compaction tasks is limited by: | 3235 // The number of parallel compaction tasks is limited by: |
| (...skipping 25 matching lines...) Expand all Loading... | |
| 3246 typedef int* PerPageData; // Pointer to number of aborted pages. | 3261 typedef int* PerPageData; // Pointer to number of aborted pages. |
| 3247 typedef MarkCompactCollector::Evacuator* PerTaskData; | 3262 typedef MarkCompactCollector::Evacuator* PerTaskData; |
| 3248 | 3263 |
| 3249 static const bool NeedSequentialFinalization = true; | 3264 static const bool NeedSequentialFinalization = true; |
| 3250 | 3265 |
| 3251 static bool ProcessPageInParallel(Heap* heap, PerTaskData evacuator, | 3266 static bool ProcessPageInParallel(Heap* heap, PerTaskData evacuator, |
| 3252 MemoryChunk* chunk, PerPageData) { | 3267 MemoryChunk* chunk, PerPageData) { |
| 3253 return evacuator->EvacuatePage(reinterpret_cast<Page*>(chunk)); | 3268 return evacuator->EvacuatePage(reinterpret_cast<Page*>(chunk)); |
| 3254 } | 3269 } |
| 3255 | 3270 |
| 3256 static void FinalizePageSequentially(Heap* heap, MemoryChunk* chunk, | 3271 static void FinalizePageSequentially(Heap* heap, MemoryChunk* chunk, |
|
Michael Lippautz
2016/06/09 09:21:22
The plan (followup) is to somehow get rid of the a
| |
| 3257 bool success, PerPageData data) { | 3272 bool success, PerPageData data) { |
| 3258 if (chunk->InNewSpace()) { | 3273 using Evacuator = MarkCompactCollector::Evacuator; |
| 3259 DCHECK(success); | 3274 Page* p = static_cast<Page*>(chunk); |
| 3260 } else if (chunk->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION)) { | 3275 switch (Evacuator::ComputeEvacuationMode(p)) { |
| 3261 DCHECK(success); | 3276 case Evacuator::kPageNewToOld: |
| 3262 Page* p = static_cast<Page*>(chunk); | 3277 break; |
| 3263 p->ClearFlag(Page::PAGE_NEW_OLD_PROMOTION); | 3278 case Evacuator::kPageNewToNew: |
| 3264 p->ForAllFreeListCategories( | 3279 DCHECK(success); |
| 3265 [](FreeListCategory* category) { DCHECK(!category->is_linked()); }); | 3280 break; |
| 3266 heap->mark_compact_collector()->sweeper().AddLatePage( | 3281 case Evacuator::kObjectsNewToOld: |
| 3267 p->owner()->identity(), p); | 3282 DCHECK(success); |
| 3268 } else { | 3283 break; |
| 3269 Page* p = static_cast<Page*>(chunk); | 3284 case Evacuator::kObjectsOldToOld: |
| 3270 if (success) { | 3285 if (success) { |
| 3271 DCHECK(p->IsEvacuationCandidate()); | 3286 DCHECK(p->IsEvacuationCandidate()); |
| 3272 DCHECK(p->SweepingDone()); | 3287 DCHECK(p->SweepingDone()); |
| 3273 p->Unlink(); | 3288 p->Unlink(); |
| 3274 } else { | 3289 } else { |
| 3275 // We have partially compacted the page, i.e., some objects may have | 3290 // We have partially compacted the page, i.e., some objects may have |
| 3276 // moved, others are still in place. | 3291 // moved, others are still in place. |
| 3277 p->ClearEvacuationCandidate(); | 3292 p->ClearEvacuationCandidate(); |
| 3278 // Slots have already been recorded so we just need to add it to the | 3293 // Slots have already been recorded so we just need to add it to the |
| 3279 // sweeper. | 3294 // sweeper, which will happen after updating pointers. |
| 3280 *data += 1; | 3295 *data += 1; |
| 3281 } | 3296 } |
| 3297 break; | |
| 3298 default: | |
| 3299 UNREACHABLE(); | |
| 3282 } | 3300 } |
| 3283 } | 3301 } |
| 3284 }; | 3302 }; |
| 3285 | 3303 |
| 3286 void MarkCompactCollector::EvacuatePagesInParallel() { | 3304 void MarkCompactCollector::EvacuatePagesInParallel() { |
| 3287 PageParallelJob<EvacuationJobTraits> job( | 3305 PageParallelJob<EvacuationJobTraits> job( |
| 3288 heap_, heap_->isolate()->cancelable_task_manager(), | 3306 heap_, heap_->isolate()->cancelable_task_manager(), |
| 3289 &page_parallel_job_semaphore_); | 3307 &page_parallel_job_semaphore_); |
| 3290 | 3308 |
| 3291 int abandoned_pages = 0; | 3309 int abandoned_pages = 0; |
| 3292 intptr_t live_bytes = 0; | 3310 intptr_t live_bytes = 0; |
| 3293 for (Page* page : evacuation_candidates_) { | 3311 for (Page* page : evacuation_candidates_) { |
| 3294 live_bytes += page->LiveBytes(); | 3312 live_bytes += page->LiveBytes(); |
| 3295 job.AddPage(page, &abandoned_pages); | 3313 job.AddPage(page, &abandoned_pages); |
| 3296 } | 3314 } |
| 3297 | 3315 |
| 3298 const Address age_mark = heap()->new_space()->age_mark(); | 3316 const Address age_mark = heap()->new_space()->age_mark(); |
| 3299 for (Page* page : newspace_evacuation_candidates_) { | 3317 for (Page* page : newspace_evacuation_candidates_) { |
| 3300 live_bytes += page->LiveBytes(); | 3318 live_bytes += page->LiveBytes(); |
| 3301 if (!page->NeverEvacuate() && | 3319 if (!page->NeverEvacuate() && |
| 3302 (page->LiveBytes() > Evacuator::PageEvacuationThreshold()) && | 3320 (page->LiveBytes() > Evacuator::PageEvacuationThreshold()) && |
| 3303 page->IsFlagSet(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK) && | |
| 3304 !page->Contains(age_mark)) { | 3321 !page->Contains(age_mark)) { |
| 3305 EvacuateNewSpacePageVisitor::TryMoveToOldSpace(page, heap()->old_space()); | 3322 if (page->IsFlagSet(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK)) { |
| 3323 EvacuateNewSpacePageVisitor::TryMoveToOldSpace(page, | |
| 3324 heap()->old_space()); | |
| 3325 } else { | |
| 3326 EvacuateNewSpacePageVisitor::MoveToToSpace(page, heap()->new_space()); | |
| 3327 } | |
| 3306 } | 3328 } |
| 3329 | |
| 3307 job.AddPage(page, &abandoned_pages); | 3330 job.AddPage(page, &abandoned_pages); |
| 3308 } | 3331 } |
| 3309 DCHECK_GE(job.NumberOfPages(), 1); | 3332 DCHECK_GE(job.NumberOfPages(), 1); |
| 3310 | 3333 |
| 3311 // Used for trace summary. | 3334 // Used for trace summary. |
| 3312 double compaction_speed = 0; | 3335 double compaction_speed = 0; |
| 3313 if (FLAG_trace_evacuation) { | 3336 if (FLAG_trace_evacuation) { |
| 3314 compaction_speed = heap()->tracer()->CompactionSpeedInBytesPerMillisecond(); | 3337 compaction_speed = heap()->tracer()->CompactionSpeedInBytesPerMillisecond(); |
| 3315 } | 3338 } |
| 3316 | 3339 |
| (...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 3350 return map_word.ToForwardingAddress(); | 3373 return map_word.ToForwardingAddress(); |
| 3351 } | 3374 } |
| 3352 } | 3375 } |
| 3353 return object; | 3376 return object; |
| 3354 } | 3377 } |
| 3355 }; | 3378 }; |
| 3356 | 3379 |
| 3357 template <MarkCompactCollector::Sweeper::SweepingMode sweeping_mode, | 3380 template <MarkCompactCollector::Sweeper::SweepingMode sweeping_mode, |
| 3358 MarkCompactCollector::Sweeper::SweepingParallelism parallelism, | 3381 MarkCompactCollector::Sweeper::SweepingParallelism parallelism, |
| 3359 MarkCompactCollector::Sweeper::SkipListRebuildingMode skip_list_mode, | 3382 MarkCompactCollector::Sweeper::SkipListRebuildingMode skip_list_mode, |
| 3383 MarkCompactCollector::Sweeper::FreeListRebuildingMode free_list_mode, | |
| 3360 MarkCompactCollector::Sweeper::FreeSpaceTreatmentMode free_space_mode> | 3384 MarkCompactCollector::Sweeper::FreeSpaceTreatmentMode free_space_mode> |
| 3361 int MarkCompactCollector::Sweeper::RawSweep(PagedSpace* space, Page* p, | 3385 int MarkCompactCollector::Sweeper::RawSweep(PagedSpace* space, Page* p, |
| 3362 ObjectVisitor* v) { | 3386 ObjectVisitor* v) { |
| 3363 DCHECK(!p->IsEvacuationCandidate() && !p->SweepingDone()); | 3387 DCHECK(!p->IsEvacuationCandidate() && !p->SweepingDone()); |
| 3364 DCHECK(!p->IsFlagSet(Page::BLACK_PAGE)); | 3388 DCHECK(!p->IsFlagSet(Page::BLACK_PAGE)); |
| 3365 DCHECK_EQ(skip_list_mode == REBUILD_SKIP_LIST, | 3389 DCHECK((space == nullptr) || (space->identity() != CODE_SPACE) || |
| 3366 space->identity() == CODE_SPACE); | 3390 (skip_list_mode == REBUILD_SKIP_LIST)); |
| 3367 DCHECK((p->skip_list() == NULL) || (skip_list_mode == REBUILD_SKIP_LIST)); | 3391 DCHECK((p->skip_list() == NULL) || (skip_list_mode == REBUILD_SKIP_LIST)); |
| 3368 DCHECK(parallelism == SWEEP_ON_MAIN_THREAD || sweeping_mode == SWEEP_ONLY); | 3392 DCHECK(parallelism == SWEEP_ON_MAIN_THREAD || sweeping_mode == SWEEP_ONLY); |
| 3369 | 3393 |
| 3370 // Before we sweep objects on the page, we free dead array buffers which | 3394 // Before we sweep objects on the page, we free dead array buffers which |
| 3371 // requires valid mark bits. | 3395 // requires valid mark bits. |
| 3372 ArrayBufferTracker::FreeDead(p); | 3396 ArrayBufferTracker::FreeDead(p); |
| 3373 | 3397 |
| 3374 Address free_start = p->area_start(); | 3398 Address free_start = p->area_start(); |
| 3375 DCHECK(reinterpret_cast<intptr_t>(free_start) % (32 * kPointerSize) == 0); | 3399 DCHECK(reinterpret_cast<intptr_t>(free_start) % (32 * kPointerSize) == 0); |
| 3376 | 3400 |
| (...skipping 12 matching lines...) Expand all Loading... | |
| 3389 LiveObjectIterator<kBlackObjects> it(p); | 3413 LiveObjectIterator<kBlackObjects> it(p); |
| 3390 HeapObject* object = NULL; | 3414 HeapObject* object = NULL; |
| 3391 while ((object = it.Next()) != NULL) { | 3415 while ((object = it.Next()) != NULL) { |
| 3392 DCHECK(Marking::IsBlack(Marking::MarkBitFrom(object))); | 3416 DCHECK(Marking::IsBlack(Marking::MarkBitFrom(object))); |
| 3393 Address free_end = object->address(); | 3417 Address free_end = object->address(); |
| 3394 if (free_end != free_start) { | 3418 if (free_end != free_start) { |
| 3395 int size = static_cast<int>(free_end - free_start); | 3419 int size = static_cast<int>(free_end - free_start); |
| 3396 if (free_space_mode == ZAP_FREE_SPACE) { | 3420 if (free_space_mode == ZAP_FREE_SPACE) { |
| 3397 memset(free_start, 0xcc, size); | 3421 memset(free_start, 0xcc, size); |
| 3398 } | 3422 } |
| 3399 freed_bytes = space->UnaccountedFree(free_start, size); | 3423 if (free_list_mode == REBUILD_FREE_LIST) { |
| 3400 max_freed_bytes = Max(freed_bytes, max_freed_bytes); | 3424 freed_bytes = space->UnaccountedFree(free_start, size); |
| 3425 max_freed_bytes = Max(freed_bytes, max_freed_bytes); | |
| 3426 } else { | |
| 3427 p->heap()->CreateFillerObjectAt(free_start, size, | |
| 3428 ClearRecordedSlots::kNo); | |
| 3429 } | |
| 3401 } | 3430 } |
| 3402 Map* map = object->synchronized_map(); | 3431 Map* map = object->synchronized_map(); |
| 3403 int size = object->SizeFromMap(map); | 3432 int size = object->SizeFromMap(map); |
| 3404 if (sweeping_mode == SWEEP_AND_VISIT_LIVE_OBJECTS) { | 3433 if (sweeping_mode == SWEEP_AND_VISIT_LIVE_OBJECTS) { |
| 3405 object->IterateBody(map->instance_type(), size, v); | 3434 object->IterateBody(map->instance_type(), size, v); |
| 3406 } | 3435 } |
| 3407 if ((skip_list_mode == REBUILD_SKIP_LIST) && skip_list != NULL) { | 3436 if ((skip_list_mode == REBUILD_SKIP_LIST) && skip_list != NULL) { |
| 3408 int new_region_start = SkipList::RegionNumber(free_end); | 3437 int new_region_start = SkipList::RegionNumber(free_end); |
| 3409 int new_region_end = | 3438 int new_region_end = |
| 3410 SkipList::RegionNumber(free_end + size - kPointerSize); | 3439 SkipList::RegionNumber(free_end + size - kPointerSize); |
| 3411 if (new_region_start != curr_region || new_region_end != curr_region) { | 3440 if (new_region_start != curr_region || new_region_end != curr_region) { |
| 3412 skip_list->AddObject(free_end, size); | 3441 skip_list->AddObject(free_end, size); |
| 3413 curr_region = new_region_end; | 3442 curr_region = new_region_end; |
| 3414 } | 3443 } |
| 3415 } | 3444 } |
| 3416 free_start = free_end + size; | 3445 free_start = free_end + size; |
| 3417 } | 3446 } |
| 3418 | 3447 |
| 3419 // Clear the mark bits of that page and reset live bytes count. | 3448 // Clear the mark bits of that page and reset live bytes count. |
| 3420 Bitmap::Clear(p); | 3449 Bitmap::Clear(p); |
| 3421 | 3450 |
| 3422 if (free_start != p->area_end()) { | 3451 if (free_start != p->area_end()) { |
| 3423 int size = static_cast<int>(p->area_end() - free_start); | 3452 int size = static_cast<int>(p->area_end() - free_start); |
| 3424 if (free_space_mode == ZAP_FREE_SPACE) { | 3453 if (free_space_mode == ZAP_FREE_SPACE) { |
| 3425 memset(free_start, 0xcc, size); | 3454 memset(free_start, 0xcc, size); |
| 3426 } | 3455 } |
| 3427 freed_bytes = space->UnaccountedFree(free_start, size); | 3456 if (free_list_mode == REBUILD_FREE_LIST) { |
| 3428 max_freed_bytes = Max(freed_bytes, max_freed_bytes); | 3457 freed_bytes = space->UnaccountedFree(free_start, size); |
| 3458 max_freed_bytes = Max(freed_bytes, max_freed_bytes); | |
| 3459 } else { | |
| 3460 p->heap()->CreateFillerObjectAt(free_start, size, | |
| 3461 ClearRecordedSlots::kNo); | |
| 3462 } | |
| 3429 } | 3463 } |
| 3430 p->concurrent_sweeping_state().SetValue(Page::kSweepingDone); | 3464 p->concurrent_sweeping_state().SetValue(Page::kSweepingDone); |
| 3465 if (free_list_mode == IGNORE_FREE_LIST) return 0; | |
| 3431 return FreeList::GuaranteedAllocatable(static_cast<int>(max_freed_bytes)); | 3466 return FreeList::GuaranteedAllocatable(static_cast<int>(max_freed_bytes)); |
| 3432 } | 3467 } |
| 3433 | 3468 |
| 3434 void MarkCompactCollector::InvalidateCode(Code* code) { | 3469 void MarkCompactCollector::InvalidateCode(Code* code) { |
| 3435 if (heap_->incremental_marking()->IsCompacting() && | 3470 if (heap_->incremental_marking()->IsCompacting() && |
| 3436 !ShouldSkipEvacuationSlotRecording(code)) { | 3471 !ShouldSkipEvacuationSlotRecording(code)) { |
| 3437 DCHECK(compacting_); | 3472 DCHECK(compacting_); |
| 3438 | 3473 |
| 3439 // If the object is white than no slots were recorded on it yet. | 3474 // If the object is white than no slots were recorded on it yet. |
| 3440 MarkBit mark_bit = Marking::MarkBitFrom(code); | 3475 MarkBit mark_bit = Marking::MarkBitFrom(code); |
| (...skipping 95 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 3536 void MarkCompactCollector::EvacuateNewSpaceAndCandidates() { | 3571 void MarkCompactCollector::EvacuateNewSpaceAndCandidates() { |
| 3537 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE); | 3572 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE); |
| 3538 Heap::RelocationLock relocation_lock(heap()); | 3573 Heap::RelocationLock relocation_lock(heap()); |
| 3539 | 3574 |
| 3540 { | 3575 { |
| 3541 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_COPY); | 3576 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_COPY); |
| 3542 EvacuationScope evacuation_scope(this); | 3577 EvacuationScope evacuation_scope(this); |
| 3543 | 3578 |
| 3544 EvacuateNewSpacePrologue(); | 3579 EvacuateNewSpacePrologue(); |
| 3545 EvacuatePagesInParallel(); | 3580 EvacuatePagesInParallel(); |
| 3546 EvacuateNewSpaceEpilogue(); | |
| 3547 heap()->new_space()->set_age_mark(heap()->new_space()->top()); | 3581 heap()->new_space()->set_age_mark(heap()->new_space()->top()); |
| 3548 } | 3582 } |
| 3549 | 3583 |
| 3550 UpdatePointersAfterEvacuation(); | 3584 UpdatePointersAfterEvacuation(); |
| 3551 | 3585 |
| 3552 // Give pages that are queued to be freed back to the OS. Note that filtering | 3586 // Give pages that are queued to be freed back to the OS. Note that filtering |
| 3553 // slots only handles old space (for unboxed doubles), and thus map space can | 3587 // slots only handles old space (for unboxed doubles), and thus map space can |
| 3554 // still contain stale pointers. We only free the chunks after pointer updates | 3588 // still contain stale pointers. We only free the chunks after pointer updates |
| 3555 // to still have access to page headers. | 3589 // to still have access to page headers. |
| 3556 heap()->memory_allocator()->unmapper()->FreeQueuedChunks(); | 3590 heap()->memory_allocator()->unmapper()->FreeQueuedChunks(); |
| 3557 | 3591 |
| 3558 { | 3592 { |
| 3559 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_CLEAN_UP); | 3593 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_CLEAN_UP); |
| 3560 | 3594 |
| 3595 for (Page* p : newspace_evacuation_candidates_) { | |
| 3596 if (p->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION)) { | |
| 3597 p->ClearFlag(Page::PAGE_NEW_NEW_PROMOTION); | |
| 3598 sweeper().AddLatePage(p->owner()->identity(), p); | |
| 3599 } else if (p->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION)) { | |
| 3600 p->ClearFlag(Page::PAGE_NEW_OLD_PROMOTION); | |
| 3601 p->ForAllFreeListCategories( | |
| 3602 [](FreeListCategory* category) { DCHECK(!category->is_linked()); }); | |
| 3603 sweeper().AddLatePage(p->owner()->identity(), p); | |
| 3604 } | |
| 3605 } | |
| 3606 newspace_evacuation_candidates_.Rewind(0); | |
| 3607 | |
| 3561 for (Page* p : evacuation_candidates_) { | 3608 for (Page* p : evacuation_candidates_) { |
| 3562 // Important: skip list should be cleared only after roots were updated | 3609 // Important: skip list should be cleared only after roots were updated |
| 3563 // because root iteration traverses the stack and might have to find | 3610 // because root iteration traverses the stack and might have to find |
| 3564 // code objects from non-updated pc pointing into evacuation candidate. | 3611 // code objects from non-updated pc pointing into evacuation candidate. |
| 3565 SkipList* list = p->skip_list(); | 3612 SkipList* list = p->skip_list(); |
| 3566 if (list != NULL) list->Clear(); | 3613 if (list != NULL) list->Clear(); |
| 3567 if (p->IsFlagSet(Page::COMPACTION_WAS_ABORTED)) { | 3614 if (p->IsFlagSet(Page::COMPACTION_WAS_ABORTED)) { |
| 3568 sweeper().AddLatePage(p->owner()->identity(), p); | 3615 sweeper().AddLatePage(p->owner()->identity(), p); |
| 3569 p->ClearFlag(Page::COMPACTION_WAS_ABORTED); | 3616 p->ClearFlag(Page::COMPACTION_WAS_ABORTED); |
| 3570 } | 3617 } |
| (...skipping 74 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 3645 // Update the corresponding slot. | 3692 // Update the corresponding slot. |
| 3646 *slot = map_word.ToForwardingAddress(); | 3693 *slot = map_word.ToForwardingAddress(); |
| 3647 } | 3694 } |
| 3648 // If the object was in from space before and is after executing the | 3695 // If the object was in from space before and is after executing the |
| 3649 // callback in to space, the object is still live. | 3696 // callback in to space, the object is still live. |
| 3650 // Unfortunately, we do not know about the slot. It could be in a | 3697 // Unfortunately, we do not know about the slot. It could be in a |
| 3651 // just freed free space object. | 3698 // just freed free space object. |
| 3652 if (heap->InToSpace(*slot)) { | 3699 if (heap->InToSpace(*slot)) { |
| 3653 return KEEP_SLOT; | 3700 return KEEP_SLOT; |
| 3654 } | 3701 } |
| 3702 } else if (heap->InToSpace(*slot)) { | |
| 3703 // Slots can be in "to" space after a page has been moved. Since there is | |
| 3704 // no forwarding information present we need to check the markbits to | |
| 3705 // determine liveness. | |
| 3706 if (Marking::IsBlack( | |
| 3707 Marking::MarkBitFrom(reinterpret_cast<HeapObject*>(*slot)))) | |
| 3708 return KEEP_SLOT; | |
| 3655 } else { | 3709 } else { |
| 3656 DCHECK(!heap->InNewSpace(*slot)); | 3710 DCHECK(!heap->InNewSpace(*slot)); |
| 3657 } | 3711 } |
| 3658 return REMOVE_SLOT; | 3712 return REMOVE_SLOT; |
| 3659 } | 3713 } |
| 3660 }; | 3714 }; |
| 3661 | 3715 |
| 3662 int NumberOfPointerUpdateTasks(int pages) { | 3716 int NumberOfPointerUpdateTasks(int pages) { |
| 3663 if (!FLAG_parallel_pointer_update) return 1; | 3717 if (!FLAG_parallel_pointer_update) return 1; |
| 3664 const int kMaxTasks = 4; | 3718 const int kMaxTasks = 4; |
| (...skipping 12 matching lines...) Expand all Loading... | |
| 3677 job.Run(num_tasks, [](int i) { return 0; }); | 3731 job.Run(num_tasks, [](int i) { return 0; }); |
| 3678 } | 3732 } |
| 3679 | 3733 |
| 3680 class ToSpacePointerUpdateJobTraits { | 3734 class ToSpacePointerUpdateJobTraits { |
| 3681 public: | 3735 public: |
| 3682 typedef std::pair<Address, Address> PerPageData; | 3736 typedef std::pair<Address, Address> PerPageData; |
| 3683 typedef PointersUpdatingVisitor* PerTaskData; | 3737 typedef PointersUpdatingVisitor* PerTaskData; |
| 3684 | 3738 |
| 3685 static bool ProcessPageInParallel(Heap* heap, PerTaskData visitor, | 3739 static bool ProcessPageInParallel(Heap* heap, PerTaskData visitor, |
| 3686 MemoryChunk* chunk, PerPageData limits) { | 3740 MemoryChunk* chunk, PerPageData limits) { |
| 3741 if (chunk->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION)) { | |
| 3742 // New->new promoted pages contain garbage so they require iteration | |
| 3743 // using markbits. | |
| 3744 ProcessPageInParallelVisitLive(heap, visitor, chunk, limits); | |
| 3745 } else { | |
| 3746 ProcessPageInParallelVisitAll(heap, visitor, chunk, limits); | |
| 3747 } | |
| 3748 return true; | |
| 3749 } | |
| 3750 | |
| 3751 static const bool NeedSequentialFinalization = false; | |
| 3752 static void FinalizePageSequentially(Heap*, MemoryChunk*, bool, PerPageData) { | |
| 3753 } | |
| 3754 | |
| 3755 private: | |
| 3756 static void ProcessPageInParallelVisitAll(Heap* heap, PerTaskData visitor, | |
| 3757 MemoryChunk* chunk, | |
| 3758 PerPageData limits) { | |
| 3687 for (Address cur = limits.first; cur < limits.second;) { | 3759 for (Address cur = limits.first; cur < limits.second;) { |
| 3688 HeapObject* object = HeapObject::FromAddress(cur); | 3760 HeapObject* object = HeapObject::FromAddress(cur); |
| 3689 Map* map = object->map(); | 3761 Map* map = object->map(); |
| 3690 int size = object->SizeFromMap(map); | 3762 int size = object->SizeFromMap(map); |
| 3691 object->IterateBody(map->instance_type(), size, visitor); | 3763 object->IterateBody(map->instance_type(), size, visitor); |
| 3692 cur += size; | 3764 cur += size; |
| 3693 } | 3765 } |
| 3694 return true; | |
| 3695 } | 3766 } |
| 3696 static const bool NeedSequentialFinalization = false; | 3767 |
| 3697 static void FinalizePageSequentially(Heap*, MemoryChunk*, bool, PerPageData) { | 3768 static void ProcessPageInParallelVisitLive(Heap* heap, PerTaskData visitor, |
| 3769 MemoryChunk* chunk, | |
| 3770 PerPageData limits) { | |
| 3771 LiveObjectIterator<kBlackObjects> it(chunk); | |
| 3772 HeapObject* object = NULL; | |
| 3773 while ((object = it.Next()) != NULL) { | |
| 3774 Map* map = object->map(); | |
| 3775 int size = object->SizeFromMap(map); | |
| 3776 object->IterateBody(map->instance_type(), size, visitor); | |
| 3777 } | |
| 3698 } | 3778 } |
| 3699 }; | 3779 }; |
| 3700 | 3780 |
| 3701 void UpdateToSpacePointersInParallel(Heap* heap, base::Semaphore* semaphore) { | 3781 void UpdateToSpacePointersInParallel(Heap* heap, base::Semaphore* semaphore) { |
| 3702 PageParallelJob<ToSpacePointerUpdateJobTraits> job( | 3782 PageParallelJob<ToSpacePointerUpdateJobTraits> job( |
| 3703 heap, heap->isolate()->cancelable_task_manager(), semaphore); | 3783 heap, heap->isolate()->cancelable_task_manager(), semaphore); |
| 3704 Address space_start = heap->new_space()->bottom(); | 3784 Address space_start = heap->new_space()->bottom(); |
| 3705 Address space_end = heap->new_space()->top(); | 3785 Address space_end = heap->new_space()->top(); |
| 3706 NewSpacePageIterator it(space_start, space_end); | 3786 NewSpacePageIterator it(space_start, space_end); |
| 3707 while (it.has_next()) { | 3787 while (it.has_next()) { |
| (...skipping 55 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 3763 heap()->memory_allocator()->unmapper()->FreeQueuedChunks(); | 3843 heap()->memory_allocator()->unmapper()->FreeQueuedChunks(); |
| 3764 } | 3844 } |
| 3765 | 3845 |
| 3766 int MarkCompactCollector::Sweeper::ParallelSweepSpace(AllocationSpace identity, | 3846 int MarkCompactCollector::Sweeper::ParallelSweepSpace(AllocationSpace identity, |
| 3767 int required_freed_bytes, | 3847 int required_freed_bytes, |
| 3768 int max_pages) { | 3848 int max_pages) { |
| 3769 int max_freed = 0; | 3849 int max_freed = 0; |
| 3770 int pages_freed = 0; | 3850 int pages_freed = 0; |
| 3771 Page* page = nullptr; | 3851 Page* page = nullptr; |
| 3772 while ((page = GetSweepingPageSafe(identity)) != nullptr) { | 3852 while ((page = GetSweepingPageSafe(identity)) != nullptr) { |
| 3773 int freed = ParallelSweepPage(page, heap_->paged_space(identity)); | 3853 int freed = ParallelSweepPage(page, identity); |
| 3774 pages_freed += 1; | 3854 pages_freed += 1; |
| 3775 DCHECK_GE(freed, 0); | 3855 DCHECK_GE(freed, 0); |
| 3776 max_freed = Max(max_freed, freed); | 3856 max_freed = Max(max_freed, freed); |
| 3777 if ((required_freed_bytes) > 0 && (max_freed >= required_freed_bytes)) | 3857 if ((required_freed_bytes) > 0 && (max_freed >= required_freed_bytes)) |
| 3778 return max_freed; | 3858 return max_freed; |
| 3779 if ((max_pages > 0) && (pages_freed >= max_pages)) return max_freed; | 3859 if ((max_pages > 0) && (pages_freed >= max_pages)) return max_freed; |
| 3780 } | 3860 } |
| 3781 return max_freed; | 3861 return max_freed; |
| 3782 } | 3862 } |
| 3783 | 3863 |
| 3784 int MarkCompactCollector::Sweeper::ParallelSweepPage(Page* page, | 3864 int MarkCompactCollector::Sweeper::ParallelSweepPage(Page* page, |
| 3785 PagedSpace* space) { | 3865 AllocationSpace identity) { |
| 3786 int max_freed = 0; | 3866 int max_freed = 0; |
| 3787 if (page->mutex()->TryLock()) { | 3867 if (page->mutex()->TryLock()) { |
| 3788 // If this page was already swept in the meantime, we can return here. | 3868 // If this page was already swept in the meantime, we can return here. |
| 3789 if (page->concurrent_sweeping_state().Value() != Page::kSweepingPending) { | 3869 if (page->concurrent_sweeping_state().Value() != Page::kSweepingPending) { |
| 3790 page->mutex()->Unlock(); | 3870 page->mutex()->Unlock(); |
| 3791 return 0; | 3871 return 0; |
| 3792 } | 3872 } |
| 3793 page->concurrent_sweeping_state().SetValue(Page::kSweepingInProgress); | 3873 page->concurrent_sweeping_state().SetValue(Page::kSweepingInProgress); |
| 3794 if (space->identity() == OLD_SPACE) { | 3874 if (identity == NEW_SPACE) { |
| 3875 RawSweep<SWEEP_ONLY, SWEEP_IN_PARALLEL, IGNORE_SKIP_LIST, | |
| 3876 IGNORE_FREE_LIST, IGNORE_FREE_SPACE>(nullptr, page, nullptr); | |
| 3877 } else if (identity == OLD_SPACE) { | |
| 3795 max_freed = RawSweep<SWEEP_ONLY, SWEEP_IN_PARALLEL, IGNORE_SKIP_LIST, | 3878 max_freed = RawSweep<SWEEP_ONLY, SWEEP_IN_PARALLEL, IGNORE_SKIP_LIST, |
| 3796 IGNORE_FREE_SPACE>(space, page, NULL); | 3879 REBUILD_FREE_LIST, IGNORE_FREE_SPACE>( |
| 3797 } else if (space->identity() == CODE_SPACE) { | 3880 heap_->paged_space(identity), page, nullptr); |
| 3881 } else if (identity == CODE_SPACE) { | |
| 3798 max_freed = RawSweep<SWEEP_ONLY, SWEEP_IN_PARALLEL, REBUILD_SKIP_LIST, | 3882 max_freed = RawSweep<SWEEP_ONLY, SWEEP_IN_PARALLEL, REBUILD_SKIP_LIST, |
| 3799 IGNORE_FREE_SPACE>(space, page, NULL); | 3883 REBUILD_FREE_LIST, IGNORE_FREE_SPACE>( |
| 3884 heap_->paged_space(identity), page, nullptr); | |
| 3800 } else { | 3885 } else { |
| 3801 max_freed = RawSweep<SWEEP_ONLY, SWEEP_IN_PARALLEL, IGNORE_SKIP_LIST, | 3886 max_freed = RawSweep<SWEEP_ONLY, SWEEP_IN_PARALLEL, IGNORE_SKIP_LIST, |
| 3802 IGNORE_FREE_SPACE>(space, page, NULL); | 3887 REBUILD_FREE_LIST, IGNORE_FREE_SPACE>( |
| 3888 heap_->paged_space(identity), page, nullptr); | |
| 3803 } | 3889 } |
| 3804 { | 3890 { |
| 3805 base::LockGuard<base::Mutex> guard(&mutex_); | 3891 base::LockGuard<base::Mutex> guard(&mutex_); |
| 3806 swept_list_[space->identity()].Add(page); | 3892 swept_list_[identity].Add(page); |
| 3807 } | 3893 } |
| 3808 page->concurrent_sweeping_state().SetValue(Page::kSweepingDone); | 3894 page->concurrent_sweeping_state().SetValue(Page::kSweepingDone); |
| 3809 page->mutex()->Unlock(); | 3895 page->mutex()->Unlock(); |
| 3810 } | 3896 } |
| 3811 return max_freed; | 3897 return max_freed; |
| 3812 } | 3898 } |
| 3813 | 3899 |
| 3814 void MarkCompactCollector::Sweeper::AddPage(AllocationSpace space, Page* page) { | 3900 void MarkCompactCollector::Sweeper::AddPage(AllocationSpace space, Page* page) { |
| 3815 DCHECK(!sweeping_in_progress_); | 3901 DCHECK(!sweeping_in_progress_); |
| 3816 PrepareToBeSweptPage(space, page); | 3902 PrepareToBeSweptPage(space, page); |
| 3817 sweeping_list_[space].push_back(page); | 3903 sweeping_list_[space].push_back(page); |
| 3818 } | 3904 } |
| 3819 | 3905 |
| 3820 void MarkCompactCollector::Sweeper::AddLatePage(AllocationSpace space, | 3906 void MarkCompactCollector::Sweeper::AddLatePage(AllocationSpace space, |
| 3821 Page* page) { | 3907 Page* page) { |
| 3822 DCHECK(sweeping_in_progress_); | 3908 DCHECK(sweeping_in_progress_); |
| 3823 PrepareToBeSweptPage(space, page); | 3909 PrepareToBeSweptPage(space, page); |
| 3824 late_pages_ = true; | 3910 late_pages_ = true; |
| 3825 AddSweepingPageSafe(space, page); | 3911 AddSweepingPageSafe(space, page); |
| 3826 } | 3912 } |
| 3827 | 3913 |
| 3828 void MarkCompactCollector::Sweeper::PrepareToBeSweptPage(AllocationSpace space, | 3914 void MarkCompactCollector::Sweeper::PrepareToBeSweptPage(AllocationSpace space, |
| 3829 Page* page) { | 3915 Page* page) { |
| 3830 page->concurrent_sweeping_state().SetValue(Page::kSweepingPending); | 3916 page->concurrent_sweeping_state().SetValue(Page::kSweepingPending); |
| 3831 int to_sweep = page->area_size() - page->LiveBytes(); | 3917 int to_sweep = page->area_size() - page->LiveBytes(); |
| 3832 heap_->paged_space(space)->accounting_stats_.ShrinkSpace(to_sweep); | 3918 if (space != NEW_SPACE) |
| 3919 heap_->paged_space(space)->accounting_stats_.ShrinkSpace(to_sweep); | |
| 3833 } | 3920 } |
| 3834 | 3921 |
| 3835 Page* MarkCompactCollector::Sweeper::GetSweepingPageSafe( | 3922 Page* MarkCompactCollector::Sweeper::GetSweepingPageSafe( |
| 3836 AllocationSpace space) { | 3923 AllocationSpace space) { |
| 3837 base::LockGuard<base::Mutex> guard(&mutex_); | 3924 base::LockGuard<base::Mutex> guard(&mutex_); |
| 3838 Page* page = nullptr; | 3925 Page* page = nullptr; |
| 3839 if (!sweeping_list_[space].empty()) { | 3926 if (!sweeping_list_[space].empty()) { |
| 3840 page = sweeping_list_[space].front(); | 3927 page = sweeping_list_[space].front(); |
| 3841 sweeping_list_[space].pop_front(); | 3928 sweeping_list_[space].pop_front(); |
| 3842 } | 3929 } |
| (...skipping 43 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 3886 continue; | 3973 continue; |
| 3887 } | 3974 } |
| 3888 | 3975 |
| 3889 if (p->IsFlagSet(Page::NEVER_ALLOCATE_ON_PAGE)) { | 3976 if (p->IsFlagSet(Page::NEVER_ALLOCATE_ON_PAGE)) { |
| 3890 // We need to sweep the page to get it into an iterable state again. Note | 3977 // We need to sweep the page to get it into an iterable state again. Note |
| 3891 // that this adds unusable memory into the free list that is later on | 3978 // that this adds unusable memory into the free list that is later on |
| 3892 // (in the free list) dropped again. Since we only use the flag for | 3979 // (in the free list) dropped again. Since we only use the flag for |
| 3893 // testing this is fine. | 3980 // testing this is fine. |
| 3894 p->concurrent_sweeping_state().SetValue(Page::kSweepingInProgress); | 3981 p->concurrent_sweeping_state().SetValue(Page::kSweepingInProgress); |
| 3895 Sweeper::RawSweep<Sweeper::SWEEP_ONLY, Sweeper::SWEEP_ON_MAIN_THREAD, | 3982 Sweeper::RawSweep<Sweeper::SWEEP_ONLY, Sweeper::SWEEP_ON_MAIN_THREAD, |
| 3896 Sweeper::IGNORE_SKIP_LIST, Sweeper::IGNORE_FREE_SPACE>( | 3983 Sweeper::IGNORE_SKIP_LIST, Sweeper::IGNORE_FREE_LIST, |
| 3897 space, p, nullptr); | 3984 Sweeper::IGNORE_FREE_SPACE>(space, p, nullptr); |
| 3898 continue; | 3985 continue; |
| 3899 } | 3986 } |
| 3900 | 3987 |
| 3901 // One unused page is kept, all further are released before sweeping them. | 3988 // One unused page is kept, all further are released before sweeping them. |
| 3902 if (p->LiveBytes() == 0) { | 3989 if (p->LiveBytes() == 0) { |
| 3903 if (unused_page_present) { | 3990 if (unused_page_present) { |
| 3904 if (FLAG_gc_verbose) { | 3991 if (FLAG_gc_verbose) { |
| 3905 PrintIsolate(isolate(), "sweeping: released page: %p", | 3992 PrintIsolate(isolate(), "sweeping: released page: %p", |
| 3906 static_cast<void*>(p)); | 3993 static_cast<void*>(p)); |
| 3907 } | 3994 } |
| (...skipping 85 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 3993 MarkBit mark_bit = Marking::MarkBitFrom(host); | 4080 MarkBit mark_bit = Marking::MarkBitFrom(host); |
| 3994 if (Marking::IsBlack(mark_bit)) { | 4081 if (Marking::IsBlack(mark_bit)) { |
| 3995 RelocInfo rinfo(isolate(), pc, RelocInfo::CODE_TARGET, 0, host); | 4082 RelocInfo rinfo(isolate(), pc, RelocInfo::CODE_TARGET, 0, host); |
| 3996 RecordRelocSlot(host, &rinfo, target); | 4083 RecordRelocSlot(host, &rinfo, target); |
| 3997 } | 4084 } |
| 3998 } | 4085 } |
| 3999 } | 4086 } |
| 4000 | 4087 |
| 4001 } // namespace internal | 4088 } // namespace internal |
| 4002 } // namespace v8 | 4089 } // namespace v8 |
| OLD | NEW |