OLD | NEW |
---|---|
1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "src/heap/mark-compact.h" | 5 #include "src/heap/mark-compact.h" |
6 | 6 |
7 #include "src/base/atomicops.h" | 7 #include "src/base/atomicops.h" |
8 #include "src/base/bits.h" | 8 #include "src/base/bits.h" |
9 #include "src/base/sys-info.h" | 9 #include "src/base/sys-info.h" |
10 #include "src/code-stubs.h" | 10 #include "src/code-stubs.h" |
(...skipping 630 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
641 CHECK(p->slots_buffer() == NULL); | 641 CHECK(p->slots_buffer() == NULL); |
642 DCHECK(p->area_size() == area_size); | 642 DCHECK(p->area_size() == area_size); |
643 int live_bytes = | 643 int live_bytes = |
644 p->WasSwept() ? p->LiveBytesFromFreeList() : p->LiveBytes(); | 644 p->WasSwept() ? p->LiveBytesFromFreeList() : p->LiveBytes(); |
645 pages.push_back(std::make_pair(live_bytes, p)); | 645 pages.push_back(std::make_pair(live_bytes, p)); |
646 } | 646 } |
647 | 647 |
648 int candidate_count = 0; | 648 int candidate_count = 0; |
649 int total_live_bytes = 0; | 649 int total_live_bytes = 0; |
650 | 650 |
651 bool reduce_memory = heap()->ShouldReduceMemory(); | 651 const bool reduce_memory = heap()->ShouldReduceMemory(); |
652 if (FLAG_manual_evacuation_candidates_selection) { | 652 if (FLAG_manual_evacuation_candidates_selection) { |
653 for (size_t i = 0; i < pages.size(); i++) { | 653 for (size_t i = 0; i < pages.size(); i++) { |
654 Page* p = pages[i].second; | 654 Page* p = pages[i].second; |
655 if (p->IsFlagSet(MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING)) { | 655 if (p->IsFlagSet(MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING)) { |
656 candidate_count++; | 656 candidate_count++; |
657 total_live_bytes += pages[i].first; | 657 total_live_bytes += pages[i].first; |
658 p->ClearFlag(MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING); | 658 p->ClearFlag(MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING); |
659 AddEvacuationCandidate(p); | 659 AddEvacuationCandidate(p); |
660 } | 660 } |
661 } | 661 } |
662 } else if (FLAG_stress_compaction) { | 662 } else if (FLAG_stress_compaction) { |
663 for (size_t i = 0; i < pages.size(); i++) { | 663 for (size_t i = 0; i < pages.size(); i++) { |
664 Page* p = pages[i].second; | 664 Page* p = pages[i].second; |
665 if (i % 2 == 0) { | 665 if (i % 2 == 0) { |
666 candidate_count++; | 666 candidate_count++; |
667 total_live_bytes += pages[i].first; | 667 total_live_bytes += pages[i].first; |
668 AddEvacuationCandidate(p); | 668 AddEvacuationCandidate(p); |
669 } | 669 } |
670 } | 670 } |
671 } else { | 671 } else { |
672 const int kTargetFragmentationPercent = 50; | 672 // The following approach determines the pages that should be evacuated. |
673 const int kMaxEvacuatedBytes = 4 * Page::kPageSize; | 673 // |
674 // We use two conditions to decide whether a page qualifies as an evacuation | |
675 // candidate, or not: | |
676 // * Target fragmentation: How fragmented is a page, i.e., how is the ratio | |
677 // between live bytes and capacity of this page (= area). | |
678 // * Evacuation quota: A global quota determining how much bytes should be | |
679 // compacted. | |
680 // | |
681 // The algorithm sorts all pages by live bytes and then iterates through | |
682 // them starting at page with the most free memory, adding them to the set | |
683 // of evacuation candidates as long as both conditions (fragmentation and | |
684 // quota) hold. | |
674 | 685 |
686 // For memory reducing mode we directly define both constants. | |
675 const int kTargetFragmentationPercentForReduceMemory = 20; | 687 const int kTargetFragmentationPercentForReduceMemory = 20; |
676 const int kMaxEvacuatedBytesForReduceMemory = 12 * Page::kPageSize; | 688 const int kMaxEvacuatedBytesForReduceMemory = 12 * Page::kPageSize; |
677 | 689 |
690 // For regular mode (which is latency critical) we define less aggressive | |
691 // defaults to start and switch to a trace-based (using compaction speed) | |
692 // approach as soon as we have enough samples. | |
693 const int kTargetFragmentationPercent = 70; | |
Michael Lippautz
2015/11/26 13:19:08
Increased the default from 50 to 70, making it les
| |
694 const int kMaxEvacuatedBytes = 4 * Page::kPageSize; | |
695 // Time to take for a single area (=payload of page). Used as soon as there | |
696 // exist enough compaction speed samples. | |
697 const int kTargetMsPerArea = 1; | |
698 | |
678 int max_evacuated_bytes; | 699 int max_evacuated_bytes; |
679 int target_fragmentation_percent; | 700 int target_fragmentation_percent; |
680 | 701 |
681 if (reduce_memory) { | 702 if (reduce_memory) { |
682 target_fragmentation_percent = kTargetFragmentationPercentForReduceMemory; | 703 target_fragmentation_percent = kTargetFragmentationPercentForReduceMemory; |
683 max_evacuated_bytes = kMaxEvacuatedBytesForReduceMemory; | 704 max_evacuated_bytes = kMaxEvacuatedBytesForReduceMemory; |
684 } else { | 705 } else { |
685 target_fragmentation_percent = kTargetFragmentationPercent; | 706 const intptr_t estimated_compaction_speed = |
707 heap()->tracer()->CompactionSpeedInBytesPerMillisecond(); | |
708 if (estimated_compaction_speed != 0) { | |
709 // Estimate the target fragmentation based on traced compaction speed | |
710 // and a goal for a single page. | |
711 const intptr_t estimated_ms_per_area = | |
712 1 + static_cast<intptr_t>(area_size) / estimated_compaction_speed; | |
713 target_fragmentation_percent = | |
714 100 - 100 * kTargetMsPerArea / estimated_ms_per_area; | |
715 if (target_fragmentation_percent < 0) { | |
ulan
2015/11/26 14:15:59
Let's cap it from below by kTargetFragmentationPer
Michael Lippautz
2015/11/26 14:36:31
Done. Lower cap is using kTargetFragmentationPerce
| |
716 target_fragmentation_percent = 0; | |
717 } | |
718 } else { | |
719 target_fragmentation_percent = kTargetFragmentationPercent; | |
720 } | |
686 max_evacuated_bytes = kMaxEvacuatedBytes; | 721 max_evacuated_bytes = kMaxEvacuatedBytes; |
687 } | 722 } |
688 intptr_t free_bytes_threshold = | 723 const intptr_t free_bytes_threshold = |
689 target_fragmentation_percent * (area_size / 100); | 724 target_fragmentation_percent * (area_size / 100); |
690 | 725 |
691 // Sort pages from the most free to the least free, then select | 726 // Sort pages from the most free to the least free, then select |
692 // the first n pages for evacuation such that: | 727 // the first n pages for evacuation such that: |
693 // - the total size of evacuated objects does not exceed the specified | 728 // - the total size of evacuated objects does not exceed the specified |
694 // limit. | 729 // limit. |
695 // - fragmentation of (n+1)-th page does not exceed the specified limit. | 730 // - fragmentation of (n+1)-th page does not exceed the specified limit. |
696 std::sort(pages.begin(), pages.end(), | 731 std::sort(pages.begin(), pages.end(), |
697 [](const LiveBytesPagePair& a, const LiveBytesPagePair& b) { | 732 [](const LiveBytesPagePair& a, const LiveBytesPagePair& b) { |
698 return a.first < b.first; | 733 return a.first < b.first; |
699 }); | 734 }); |
700 for (size_t i = 0; i < pages.size(); i++) { | 735 for (size_t i = 0; i < pages.size(); i++) { |
701 int live_bytes = pages[i].first; | 736 int live_bytes = pages[i].first; |
702 int free_bytes = area_size - live_bytes; | 737 int free_bytes = area_size - live_bytes; |
703 if (FLAG_always_compact || | 738 if (FLAG_always_compact || |
704 (free_bytes >= free_bytes_threshold && | 739 ((free_bytes >= free_bytes_threshold) && |
705 total_live_bytes + live_bytes <= max_evacuated_bytes)) { | 740 ((total_live_bytes + live_bytes) <= max_evacuated_bytes))) { |
706 candidate_count++; | 741 candidate_count++; |
707 total_live_bytes += live_bytes; | 742 total_live_bytes += live_bytes; |
708 } | 743 } |
709 if (FLAG_trace_fragmentation_verbose) { | 744 if (FLAG_trace_fragmentation_verbose) { |
710 PrintF( | 745 PrintIsolate(isolate(), |
711 "Page in %s: %d KB free [fragmented if this >= %d KB], " | 746 "compaction-selection-page: space=%s free_bytes_page=%d " |
712 "sum of live bytes in fragmented pages %d KB [max is %d KB]\n", | 747 "fragmentation_limit_kb=%d fragmentation_limit_percent=%d " |
713 AllocationSpaceName(space->identity()), | 748 "sum_compaction_kb=%d " |
714 static_cast<int>(free_bytes / KB), | 749 "compaction_limit_kb=%d\n", |
715 static_cast<int>(free_bytes_threshold / KB), | 750 AllocationSpaceName(space->identity()), free_bytes / KB, |
716 static_cast<int>(total_live_bytes / KB), | 751 free_bytes_threshold / KB, target_fragmentation_percent, |
717 static_cast<int>(max_evacuated_bytes / KB)); | 752 total_live_bytes / KB, max_evacuated_bytes / KB); |
718 } | 753 } |
719 } | 754 } |
720 // How many pages we will allocated for the evacuated objects | 755 // How many pages we will allocated for the evacuated objects |
721 // in the worst case: ceil(total_live_bytes / area_size) | 756 // in the worst case: ceil(total_live_bytes / area_size) |
722 int estimated_new_pages = (total_live_bytes + area_size - 1) / area_size; | 757 int estimated_new_pages = (total_live_bytes + area_size - 1) / area_size; |
723 DCHECK_LE(estimated_new_pages, candidate_count); | 758 DCHECK_LE(estimated_new_pages, candidate_count); |
724 int estimated_released_pages = candidate_count - estimated_new_pages; | 759 int estimated_released_pages = candidate_count - estimated_new_pages; |
725 // Avoid (compact -> expand) cycles. | 760 // Avoid (compact -> expand) cycles. |
726 if (estimated_released_pages == 0 && !FLAG_always_compact) | 761 if ((estimated_released_pages == 0) && !FLAG_always_compact) { |
727 candidate_count = 0; | 762 candidate_count = 0; |
763 } | |
728 for (int i = 0; i < candidate_count; i++) { | 764 for (int i = 0; i < candidate_count; i++) { |
729 AddEvacuationCandidate(pages[i].second); | 765 AddEvacuationCandidate(pages[i].second); |
730 } | 766 } |
731 } | 767 } |
732 | 768 |
733 if (FLAG_trace_fragmentation) { | 769 if (FLAG_trace_fragmentation) { |
734 PrintF( | 770 PrintIsolate(isolate(), |
735 "Collected %d evacuation candidates [%d KB live] for space %s " | 771 "compaction-selection: space=%s reduce_memory=%d pages=%d " |
736 "[mode %s]\n", | 772 "total_live_bytes=%d\n", |
737 candidate_count, static_cast<int>(total_live_bytes / KB), | 773 AllocationSpaceName(space->identity()), reduce_memory, |
738 AllocationSpaceName(space->identity()), | 774 candidate_count, total_live_bytes / KB); |
739 (reduce_memory ? "reduce memory footprint" : "normal")); | |
740 } | 775 } |
741 } | 776 } |
742 | 777 |
743 | 778 |
744 void MarkCompactCollector::AbortCompaction() { | 779 void MarkCompactCollector::AbortCompaction() { |
745 if (compacting_) { | 780 if (compacting_) { |
746 int npages = evacuation_candidates_.length(); | 781 int npages = evacuation_candidates_.length(); |
747 for (int i = 0; i < npages; i++) { | 782 for (int i = 0; i < npages; i++) { |
748 Page* p = evacuation_candidates_[i]; | 783 Page* p = evacuation_candidates_[i]; |
749 slots_buffer_allocator_->DeallocateChain(p->slots_buffer_address()); | 784 slots_buffer_allocator_->DeallocateChain(p->slots_buffer_address()); |
(...skipping 3356 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
4106 MarkBit mark_bit = Marking::MarkBitFrom(host); | 4141 MarkBit mark_bit = Marking::MarkBitFrom(host); |
4107 if (Marking::IsBlack(mark_bit)) { | 4142 if (Marking::IsBlack(mark_bit)) { |
4108 RelocInfo rinfo(pc, RelocInfo::CODE_TARGET, 0, host); | 4143 RelocInfo rinfo(pc, RelocInfo::CODE_TARGET, 0, host); |
4109 RecordRelocSlot(&rinfo, target); | 4144 RecordRelocSlot(&rinfo, target); |
4110 } | 4145 } |
4111 } | 4146 } |
4112 } | 4147 } |
4113 | 4148 |
4114 } // namespace internal | 4149 } // namespace internal |
4115 } // namespace v8 | 4150 } // namespace v8 |
OLD | NEW |