OLD | NEW |
1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 567 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
578 bool MarkCompactCollector::AreSweeperThreadsActivated() { | 578 bool MarkCompactCollector::AreSweeperThreadsActivated() { |
579 return heap()->isolate()->sweeper_threads() != NULL; | 579 return heap()->isolate()->sweeper_threads() != NULL; |
580 } | 580 } |
581 | 581 |
582 | 582 |
583 bool MarkCompactCollector::IsConcurrentSweepingInProgress() { | 583 bool MarkCompactCollector::IsConcurrentSweepingInProgress() { |
584 return sweeping_pending_; | 584 return sweeping_pending_; |
585 } | 585 } |
586 | 586 |
587 | 587 |
588 void MarkCompactCollector::FinalizeSweeping() { | |
589 ASSERT(sweeping_pending_ == false); | |
590 ReleaseEvacuationCandidates(); | |
591 heap()->FreeQueuedChunks(); | |
592 } | |
593 | |
594 | |
595 void MarkCompactCollector::MarkInParallel() { | 588 void MarkCompactCollector::MarkInParallel() { |
596 for (int i = 0; i < FLAG_marking_threads; i++) { | 589 for (int i = 0; i < FLAG_marking_threads; i++) { |
597 heap()->isolate()->marking_threads()[i]->StartMarking(); | 590 heap()->isolate()->marking_threads()[i]->StartMarking(); |
598 } | 591 } |
599 } | 592 } |
600 | 593 |
601 | 594 |
602 void MarkCompactCollector::WaitUntilMarkingCompleted() { | 595 void MarkCompactCollector::WaitUntilMarkingCompleted() { |
603 for (int i = 0; i < FLAG_marking_threads; i++) { | 596 for (int i = 0; i < FLAG_marking_threads; i++) { |
604 heap()->isolate()->marking_threads()[i]->WaitForMarkingThread(); | 597 heap()->isolate()->marking_threads()[i]->WaitForMarkingThread(); |
(...skipping 299 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
904 #ifdef DEBUG | 897 #ifdef DEBUG |
905 ASSERT(state_ == IDLE); | 898 ASSERT(state_ == IDLE); |
906 state_ = PREPARE_GC; | 899 state_ = PREPARE_GC; |
907 #endif | 900 #endif |
908 | 901 |
909 ASSERT(!FLAG_never_compact || !FLAG_always_compact); | 902 ASSERT(!FLAG_never_compact || !FLAG_always_compact); |
910 | 903 |
911 if (IsConcurrentSweepingInProgress()) { | 904 if (IsConcurrentSweepingInProgress()) { |
912 // Instead of waiting we could also abort the sweeper threads here. | 905 // Instead of waiting we could also abort the sweeper threads here. |
913 WaitUntilSweepingCompleted(); | 906 WaitUntilSweepingCompleted(); |
914 FinalizeSweeping(); | |
915 } | 907 } |
916 | 908 |
917 // Clear marking bits if incremental marking is aborted. | 909 // Clear marking bits if incremental marking is aborted. |
918 if (was_marked_incrementally_ && abort_incremental_marking_) { | 910 if (was_marked_incrementally_ && abort_incremental_marking_) { |
919 heap()->incremental_marking()->Abort(); | 911 heap()->incremental_marking()->Abort(); |
920 ClearMarkbits(); | 912 ClearMarkbits(); |
921 AbortCompaction(); | 913 AbortCompaction(); |
922 was_marked_incrementally_ = false; | 914 was_marked_incrementally_ = false; |
923 } | 915 } |
924 | 916 |
(...skipping 1917 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2842 if (static_cast<PagedSpace*>(p->owner())->CanExpand()) { | 2834 if (static_cast<PagedSpace*>(p->owner())->CanExpand()) { |
2843 EvacuateLiveObjectsFromPage(p); | 2835 EvacuateLiveObjectsFromPage(p); |
2844 } else { | 2836 } else { |
2845 // Without room for expansion evacuation is not guaranteed to succeed. | 2837 // Without room for expansion evacuation is not guaranteed to succeed. |
2846 // Pessimistically abandon unevacuated pages. | 2838 // Pessimistically abandon unevacuated pages. |
2847 for (int j = i; j < npages; j++) { | 2839 for (int j = i; j < npages; j++) { |
2848 Page* page = evacuation_candidates_[j]; | 2840 Page* page = evacuation_candidates_[j]; |
2849 slots_buffer_allocator_.DeallocateChain(page->slots_buffer_address()); | 2841 slots_buffer_allocator_.DeallocateChain(page->slots_buffer_address()); |
2850 page->ClearEvacuationCandidate(); | 2842 page->ClearEvacuationCandidate(); |
2851 page->SetFlag(Page::RESCAN_ON_EVACUATION); | 2843 page->SetFlag(Page::RESCAN_ON_EVACUATION); |
| 2844 page->InsertAfter(static_cast<PagedSpace*>(page->owner())->anchor()); |
2852 } | 2845 } |
2853 return; | 2846 return; |
2854 } | 2847 } |
2855 } | 2848 } |
2856 } | 2849 } |
2857 } | 2850 } |
2858 | 2851 |
2859 | 2852 |
2860 class EvacuationWeakObjectRetainer : public WeakObjectRetainer { | 2853 class EvacuationWeakObjectRetainer : public WeakObjectRetainer { |
2861 public: | 2854 public: |
(...skipping 440 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3302 if (FLAG_verify_heap) { | 3295 if (FLAG_verify_heap) { |
3303 VerifyEvacuation(heap_); | 3296 VerifyEvacuation(heap_); |
3304 } | 3297 } |
3305 #endif | 3298 #endif |
3306 | 3299 |
3307 slots_buffer_allocator_.DeallocateChain(&migration_slots_buffer_); | 3300 slots_buffer_allocator_.DeallocateChain(&migration_slots_buffer_); |
3308 ASSERT(migration_slots_buffer_ == NULL); | 3301 ASSERT(migration_slots_buffer_ == NULL); |
3309 } | 3302 } |
3310 | 3303 |
3311 | 3304 |
| 3305 void MarkCompactCollector::UnlinkEvacuationCandidates() { |
| 3306 int npages = evacuation_candidates_.length(); |
| 3307 for (int i = 0; i < npages; i++) { |
| 3308 Page* p = evacuation_candidates_[i]; |
| 3309 if (!p->IsEvacuationCandidate()) continue; |
| 3310 p->Unlink(); |
| 3311 p->ClearSweptPrecisely(); |
| 3312 p->ClearSweptConservatively(); |
| 3313 } |
| 3314 } |
| 3315 |
| 3316 |
3312 void MarkCompactCollector::ReleaseEvacuationCandidates() { | 3317 void MarkCompactCollector::ReleaseEvacuationCandidates() { |
3313 int npages = evacuation_candidates_.length(); | 3318 int npages = evacuation_candidates_.length(); |
3314 for (int i = 0; i < npages; i++) { | 3319 for (int i = 0; i < npages; i++) { |
3315 Page* p = evacuation_candidates_[i]; | 3320 Page* p = evacuation_candidates_[i]; |
3316 if (!p->IsEvacuationCandidate()) continue; | 3321 if (!p->IsEvacuationCandidate()) continue; |
3317 PagedSpace* space = static_cast<PagedSpace*>(p->owner()); | 3322 PagedSpace* space = static_cast<PagedSpace*>(p->owner()); |
3318 space->Free(p->area_start(), p->area_size()); | 3323 space->Free(p->area_start(), p->area_size()); |
3319 p->set_scan_on_scavenge(false); | 3324 p->set_scan_on_scavenge(false); |
3320 slots_buffer_allocator_.DeallocateChain(p->slots_buffer_address()); | 3325 slots_buffer_allocator_.DeallocateChain(p->slots_buffer_address()); |
3321 p->ResetLiveBytes(); | 3326 p->ResetLiveBytes(); |
3322 space->ReleasePage(p); | 3327 space->ReleasePage(p, false); |
3323 } | 3328 } |
3324 evacuation_candidates_.Rewind(0); | 3329 evacuation_candidates_.Rewind(0); |
3325 compacting_ = false; | 3330 compacting_ = false; |
| 3331 heap()->FreeQueuedChunks(); |
3326 } | 3332 } |
3327 | 3333 |
3328 | 3334 |
3329 static const int kStartTableEntriesPerLine = 5; | 3335 static const int kStartTableEntriesPerLine = 5; |
3330 static const int kStartTableLines = 171; | 3336 static const int kStartTableLines = 171; |
3331 static const int kStartTableInvalidLine = 127; | 3337 static const int kStartTableInvalidLine = 127; |
3332 static const int kStartTableUnusedEntry = 126; | 3338 static const int kStartTableUnusedEntry = 126; |
3333 | 3339 |
3334 #define _ kStartTableUnusedEntry | 3340 #define _ kStartTableUnusedEntry |
3335 #define X kStartTableInvalidLine | 3341 #define X kStartTableInvalidLine |
(...skipping 451 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3787 // One unused page is kept, all further are released before sweeping them. | 3793 // One unused page is kept, all further are released before sweeping them. |
3788 if (p->LiveBytes() == 0) { | 3794 if (p->LiveBytes() == 0) { |
3789 if (unused_page_present) { | 3795 if (unused_page_present) { |
3790 if (FLAG_gc_verbose) { | 3796 if (FLAG_gc_verbose) { |
3791 PrintF("Sweeping 0x%" V8PRIxPTR " released page.\n", | 3797 PrintF("Sweeping 0x%" V8PRIxPTR " released page.\n", |
3792 reinterpret_cast<intptr_t>(p)); | 3798 reinterpret_cast<intptr_t>(p)); |
3793 } | 3799 } |
3794 // Adjust unswept free bytes because releasing a page expects said | 3800 // Adjust unswept free bytes because releasing a page expects said |
3795 // counter to be accurate for unswept pages. | 3801 // counter to be accurate for unswept pages. |
3796 space->IncreaseUnsweptFreeBytes(p); | 3802 space->IncreaseUnsweptFreeBytes(p); |
3797 space->ReleasePage(p); | 3803 space->ReleasePage(p, true); |
3798 continue; | 3804 continue; |
3799 } | 3805 } |
3800 unused_page_present = true; | 3806 unused_page_present = true; |
3801 } | 3807 } |
3802 | 3808 |
3803 switch (sweeper) { | 3809 switch (sweeper) { |
3804 case CONSERVATIVE: { | 3810 case CONSERVATIVE: { |
3805 if (FLAG_gc_verbose) { | 3811 if (FLAG_gc_verbose) { |
3806 PrintF("Sweeping 0x%" V8PRIxPTR " conservatively.\n", | 3812 PrintF("Sweeping 0x%" V8PRIxPTR " conservatively.\n", |
3807 reinterpret_cast<intptr_t>(p)); | 3813 reinterpret_cast<intptr_t>(p)); |
(...skipping 84 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3892 if (sweep_precisely_) how_to_sweep = PRECISE; | 3898 if (sweep_precisely_) how_to_sweep = PRECISE; |
3893 // Noncompacting collections simply sweep the spaces to clear the mark | 3899 // Noncompacting collections simply sweep the spaces to clear the mark |
3894 // bits and free the nonlive blocks (for old and map spaces). We sweep | 3900 // bits and free the nonlive blocks (for old and map spaces). We sweep |
3895 // the map space last because freeing non-live maps overwrites them and | 3901 // the map space last because freeing non-live maps overwrites them and |
3896 // the other spaces rely on possibly non-live maps to get the sizes for | 3902 // the other spaces rely on possibly non-live maps to get the sizes for |
3897 // non-live objects. | 3903 // non-live objects. |
3898 SequentialSweepingScope scope(this); | 3904 SequentialSweepingScope scope(this); |
3899 SweepSpace(heap()->old_pointer_space(), how_to_sweep); | 3905 SweepSpace(heap()->old_pointer_space(), how_to_sweep); |
3900 SweepSpace(heap()->old_data_space(), how_to_sweep); | 3906 SweepSpace(heap()->old_data_space(), how_to_sweep); |
3901 | 3907 |
| 3908 // Unlink evacuation candidates before sweeper threads access the list of |
| 3909 // pages to avoid race condition. |
| 3910 UnlinkEvacuationCandidates(); |
| 3911 |
3902 if (how_to_sweep == PARALLEL_CONSERVATIVE || | 3912 if (how_to_sweep == PARALLEL_CONSERVATIVE || |
3903 how_to_sweep == CONCURRENT_CONSERVATIVE) { | 3913 how_to_sweep == CONCURRENT_CONSERVATIVE) { |
3904 // TODO(hpayer): fix race with concurrent sweeper | 3914 // TODO(hpayer): fix race with concurrent sweeper |
3905 StartSweeperThreads(); | 3915 StartSweeperThreads(); |
3906 } | 3916 } |
3907 | 3917 |
3908 if (how_to_sweep == PARALLEL_CONSERVATIVE) { | 3918 if (how_to_sweep == PARALLEL_CONSERVATIVE) { |
3909 WaitUntilSweepingCompleted(); | 3919 WaitUntilSweepingCompleted(); |
3910 } | 3920 } |
3911 | 3921 |
3912 RemoveDeadInvalidatedCode(); | 3922 RemoveDeadInvalidatedCode(); |
3913 SweepSpace(heap()->code_space(), PRECISE); | 3923 SweepSpace(heap()->code_space(), PRECISE); |
3914 | 3924 |
3915 SweepSpace(heap()->cell_space(), PRECISE); | 3925 SweepSpace(heap()->cell_space(), PRECISE); |
3916 | 3926 |
3917 EvacuateNewSpaceAndCandidates(); | 3927 EvacuateNewSpaceAndCandidates(); |
3918 | 3928 |
3919 // ClearNonLiveTransitions depends on precise sweeping of map space to | 3929 // ClearNonLiveTransitions depends on precise sweeping of map space to |
3920 // detect whether unmarked map became dead in this collection or in one | 3930 // detect whether unmarked map became dead in this collection or in one |
3921 // of the previous ones. | 3931 // of the previous ones. |
3922 SweepSpace(heap()->map_space(), PRECISE); | 3932 SweepSpace(heap()->map_space(), PRECISE); |
3923 | 3933 |
3924 // Deallocate unmarked objects and clear marked bits for marked objects. | 3934 // Deallocate unmarked objects and clear marked bits for marked objects. |
3925 heap_->lo_space()->FreeUnmarkedObjects(); | 3935 heap_->lo_space()->FreeUnmarkedObjects(); |
3926 | 3936 |
3927 if (how_to_sweep != CONCURRENT_CONSERVATIVE) { | 3937 ReleaseEvacuationCandidates(); |
3928 FinalizeSweeping(); | |
3929 } | |
3930 } | 3938 } |
3931 | 3939 |
3932 | 3940 |
3933 void MarkCompactCollector::EnableCodeFlushing(bool enable) { | 3941 void MarkCompactCollector::EnableCodeFlushing(bool enable) { |
3934 #ifdef ENABLE_DEBUGGER_SUPPORT | 3942 #ifdef ENABLE_DEBUGGER_SUPPORT |
3935 if (heap()->isolate()->debug()->IsLoaded() || | 3943 if (heap()->isolate()->debug()->IsLoaded() || |
3936 heap()->isolate()->debug()->has_break_points()) { | 3944 heap()->isolate()->debug()->has_break_points()) { |
3937 enable = false; | 3945 enable = false; |
3938 } | 3946 } |
3939 #endif | 3947 #endif |
(...skipping 180 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
4120 while (buffer != NULL) { | 4128 while (buffer != NULL) { |
4121 SlotsBuffer* next_buffer = buffer->next(); | 4129 SlotsBuffer* next_buffer = buffer->next(); |
4122 DeallocateBuffer(buffer); | 4130 DeallocateBuffer(buffer); |
4123 buffer = next_buffer; | 4131 buffer = next_buffer; |
4124 } | 4132 } |
4125 *buffer_address = NULL; | 4133 *buffer_address = NULL; |
4126 } | 4134 } |
4127 | 4135 |
4128 | 4136 |
4129 } } // namespace v8::internal | 4137 } } // namespace v8::internal |
OLD | NEW |