Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(156)

Side by Side Diff: src/heap/mark-compact.cc

Issue 1480553004: [heap] Tweak evacuation candidate selection. (Closed) Base URL: https://chromium.googlesource.com/v8/v8.git@master
Patch Set: Factored out computing heuristics Created 5 years ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « src/heap/mark-compact.h ('k') | no next file » | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "src/heap/mark-compact.h" 5 #include "src/heap/mark-compact.h"
6 6
7 #include "src/base/atomicops.h" 7 #include "src/base/atomicops.h"
8 #include "src/base/bits.h" 8 #include "src/base/bits.h"
9 #include "src/base/sys-info.h" 9 #include "src/base/sys-info.h"
10 #include "src/code-stubs.h" 10 #include "src/code-stubs.h"
(...skipping 596 matching lines...) Expand 10 before | Expand all | Expand 10 after
607 case LO_SPACE: 607 case LO_SPACE:
608 return "LO_SPACE"; 608 return "LO_SPACE";
609 default: 609 default:
610 UNREACHABLE(); 610 UNREACHABLE();
611 } 611 }
612 612
613 return NULL; 613 return NULL;
614 } 614 }
615 615
616 616
617 void MarkCompactCollector::ComputeEvacuationHeuristics(
618 int area_size, int* target_fragmentation_percent,
619 int* max_evacuated_bytes) {
620 // For memory reducing mode we directly define both constants.
621 const int kTargetFragmentationPercentForReduceMemory = 20;
622 const int kMaxEvacuatedBytesForReduceMemory = 12 * Page::kPageSize;
623
624 // For regular mode (which is latency critical) we define less aggressive
625 // defaults to start and switch to a trace-based (using compaction speed)
626 // approach as soon as we have enough samples.
627 const int kTargetFragmentationPercent = 70;
628 const int kMaxEvacuatedBytes = 4 * Page::kPageSize;
629 // Time to take for a single area (=payload of page). Used as soon as there
630 // exist enough compaction speed samples.
631 const int kTargetMsPerArea = 1;
632
633 if (heap()->ShouldReduceMemory()) {
634 *target_fragmentation_percent = kTargetFragmentationPercentForReduceMemory;
635 *max_evacuated_bytes = kMaxEvacuatedBytesForReduceMemory;
636 } else {
637 const intptr_t estimated_compaction_speed =
638 heap()->tracer()->CompactionSpeedInBytesPerMillisecond();
639 if (estimated_compaction_speed != 0) {
640 // Estimate the target fragmentation based on traced compaction speed
641 // and a goal for a single page.
642 const intptr_t estimated_ms_per_area =
643 1 + static_cast<intptr_t>(area_size) / estimated_compaction_speed;
644 *target_fragmentation_percent =
645 100 - 100 * kTargetMsPerArea / estimated_ms_per_area;
646 if (*target_fragmentation_percent <
647 kTargetFragmentationPercentForReduceMemory) {
648 *target_fragmentation_percent =
649 kTargetFragmentationPercentForReduceMemory;
650 }
651 } else {
652 *target_fragmentation_percent = kTargetFragmentationPercent;
653 }
654 *max_evacuated_bytes = kMaxEvacuatedBytes;
655 }
656 }
657
658
617 void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) { 659 void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
618 DCHECK(space->identity() == OLD_SPACE || space->identity() == CODE_SPACE); 660 DCHECK(space->identity() == OLD_SPACE || space->identity() == CODE_SPACE);
619 661
620 int number_of_pages = space->CountTotalPages(); 662 int number_of_pages = space->CountTotalPages();
621 int area_size = space->AreaSize(); 663 int area_size = space->AreaSize();
622 664
623 // Pairs of (live_bytes_in_page, page). 665 // Pairs of (live_bytes_in_page, page).
624 typedef std::pair<int, Page*> LiveBytesPagePair; 666 typedef std::pair<int, Page*> LiveBytesPagePair;
625 std::vector<LiveBytesPagePair> pages; 667 std::vector<LiveBytesPagePair> pages;
626 pages.reserve(number_of_pages); 668 pages.reserve(number_of_pages);
(...skipping 14 matching lines...) Expand all
641 CHECK(p->slots_buffer() == NULL); 683 CHECK(p->slots_buffer() == NULL);
642 DCHECK(p->area_size() == area_size); 684 DCHECK(p->area_size() == area_size);
643 int live_bytes = 685 int live_bytes =
644 p->WasSwept() ? p->LiveBytesFromFreeList() : p->LiveBytes(); 686 p->WasSwept() ? p->LiveBytesFromFreeList() : p->LiveBytes();
645 pages.push_back(std::make_pair(live_bytes, p)); 687 pages.push_back(std::make_pair(live_bytes, p));
646 } 688 }
647 689
648 int candidate_count = 0; 690 int candidate_count = 0;
649 int total_live_bytes = 0; 691 int total_live_bytes = 0;
650 692
651 bool reduce_memory = heap()->ShouldReduceMemory(); 693 const bool reduce_memory = heap()->ShouldReduceMemory();
652 if (FLAG_manual_evacuation_candidates_selection) { 694 if (FLAG_manual_evacuation_candidates_selection) {
653 for (size_t i = 0; i < pages.size(); i++) { 695 for (size_t i = 0; i < pages.size(); i++) {
654 Page* p = pages[i].second; 696 Page* p = pages[i].second;
655 if (p->IsFlagSet(MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING)) { 697 if (p->IsFlagSet(MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING)) {
656 candidate_count++; 698 candidate_count++;
657 total_live_bytes += pages[i].first; 699 total_live_bytes += pages[i].first;
658 p->ClearFlag(MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING); 700 p->ClearFlag(MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING);
659 AddEvacuationCandidate(p); 701 AddEvacuationCandidate(p);
660 } 702 }
661 } 703 }
662 } else if (FLAG_stress_compaction) { 704 } else if (FLAG_stress_compaction) {
663 for (size_t i = 0; i < pages.size(); i++) { 705 for (size_t i = 0; i < pages.size(); i++) {
664 Page* p = pages[i].second; 706 Page* p = pages[i].second;
665 if (i % 2 == 0) { 707 if (i % 2 == 0) {
666 candidate_count++; 708 candidate_count++;
667 total_live_bytes += pages[i].first; 709 total_live_bytes += pages[i].first;
668 AddEvacuationCandidate(p); 710 AddEvacuationCandidate(p);
669 } 711 }
670 } 712 }
671 } else { 713 } else {
672 const int kTargetFragmentationPercent = 50; 714 // The following approach determines the pages that should be evacuated.
673 const int kMaxEvacuatedBytes = 4 * Page::kPageSize; 715 //
674 716 // We use two conditions to decide whether a page qualifies as an evacuation
675 const int kTargetFragmentationPercentForReduceMemory = 20; 717 // candidate, or not:
676 const int kMaxEvacuatedBytesForReduceMemory = 12 * Page::kPageSize; 718 // * Target fragmentation: How fragmented is a page, i.e., how is the ratio
677 719 // between live bytes and capacity of this page (= area).
720 // * Evacuation quota: A global quota determining how much bytes should be
721 // compacted.
722 //
723 // The algorithm sorts all pages by live bytes and then iterates through
724 // them starting with the page with the most free memory, adding them to the
725 // set of evacuation candidates as long as both conditions (fragmentation
726 // and quota) hold.
678 int max_evacuated_bytes; 727 int max_evacuated_bytes;
679 int target_fragmentation_percent; 728 int target_fragmentation_percent;
729 ComputeEvacuationHeuristics(area_size, &target_fragmentation_percent,
730 &max_evacuated_bytes);
680 731
681 if (reduce_memory) { 732 const intptr_t free_bytes_threshold =
682 target_fragmentation_percent = kTargetFragmentationPercentForReduceMemory;
683 max_evacuated_bytes = kMaxEvacuatedBytesForReduceMemory;
684 } else {
685 target_fragmentation_percent = kTargetFragmentationPercent;
686 max_evacuated_bytes = kMaxEvacuatedBytes;
687 }
688 intptr_t free_bytes_threshold =
689 target_fragmentation_percent * (area_size / 100); 733 target_fragmentation_percent * (area_size / 100);
690 734
691 // Sort pages from the most free to the least free, then select 735 // Sort pages from the most free to the least free, then select
692 // the first n pages for evacuation such that: 736 // the first n pages for evacuation such that:
693 // - the total size of evacuated objects does not exceed the specified 737 // - the total size of evacuated objects does not exceed the specified
694 // limit. 738 // limit.
695 // - fragmentation of (n+1)-th page does not exceed the specified limit. 739 // - fragmentation of (n+1)-th page does not exceed the specified limit.
696 std::sort(pages.begin(), pages.end(), 740 std::sort(pages.begin(), pages.end(),
697 [](const LiveBytesPagePair& a, const LiveBytesPagePair& b) { 741 [](const LiveBytesPagePair& a, const LiveBytesPagePair& b) {
698 return a.first < b.first; 742 return a.first < b.first;
699 }); 743 });
700 for (size_t i = 0; i < pages.size(); i++) { 744 for (size_t i = 0; i < pages.size(); i++) {
701 int live_bytes = pages[i].first; 745 int live_bytes = pages[i].first;
702 int free_bytes = area_size - live_bytes; 746 int free_bytes = area_size - live_bytes;
703 if (FLAG_always_compact || 747 if (FLAG_always_compact ||
704 (free_bytes >= free_bytes_threshold && 748 ((free_bytes >= free_bytes_threshold) &&
705 total_live_bytes + live_bytes <= max_evacuated_bytes)) { 749 ((total_live_bytes + live_bytes) <= max_evacuated_bytes))) {
706 candidate_count++; 750 candidate_count++;
707 total_live_bytes += live_bytes; 751 total_live_bytes += live_bytes;
708 } 752 }
709 if (FLAG_trace_fragmentation_verbose) { 753 if (FLAG_trace_fragmentation_verbose) {
710 PrintF( 754 PrintIsolate(isolate(),
711 "Page in %s: %d KB free [fragmented if this >= %d KB], " 755 "compaction-selection-page: space=%s free_bytes_page=%d "
712 "sum of live bytes in fragmented pages %d KB [max is %d KB]\n", 756 "fragmentation_limit_kb=%d fragmentation_limit_percent=%d "
713 AllocationSpaceName(space->identity()), 757 "sum_compaction_kb=%d "
714 static_cast<int>(free_bytes / KB), 758 "compaction_limit_kb=%d\n",
715 static_cast<int>(free_bytes_threshold / KB), 759 AllocationSpaceName(space->identity()), free_bytes / KB,
716 static_cast<int>(total_live_bytes / KB), 760 free_bytes_threshold / KB, target_fragmentation_percent,
717 static_cast<int>(max_evacuated_bytes / KB)); 761 total_live_bytes / KB, max_evacuated_bytes / KB);
718 } 762 }
719 } 763 }
720 // How many pages we will allocated for the evacuated objects 764 // How many pages we will allocated for the evacuated objects
721 // in the worst case: ceil(total_live_bytes / area_size) 765 // in the worst case: ceil(total_live_bytes / area_size)
722 int estimated_new_pages = (total_live_bytes + area_size - 1) / area_size; 766 int estimated_new_pages = (total_live_bytes + area_size - 1) / area_size;
723 DCHECK_LE(estimated_new_pages, candidate_count); 767 DCHECK_LE(estimated_new_pages, candidate_count);
724 int estimated_released_pages = candidate_count - estimated_new_pages; 768 int estimated_released_pages = candidate_count - estimated_new_pages;
725 // Avoid (compact -> expand) cycles. 769 // Avoid (compact -> expand) cycles.
726 if (estimated_released_pages == 0 && !FLAG_always_compact) 770 if ((estimated_released_pages == 0) && !FLAG_always_compact) {
727 candidate_count = 0; 771 candidate_count = 0;
772 }
728 for (int i = 0; i < candidate_count; i++) { 773 for (int i = 0; i < candidate_count; i++) {
729 AddEvacuationCandidate(pages[i].second); 774 AddEvacuationCandidate(pages[i].second);
730 } 775 }
731 } 776 }
732 777
733 if (FLAG_trace_fragmentation) { 778 if (FLAG_trace_fragmentation) {
734 PrintF( 779 PrintIsolate(isolate(),
735 "Collected %d evacuation candidates [%d KB live] for space %s " 780 "compaction-selection: space=%s reduce_memory=%d pages=%d "
736 "[mode %s]\n", 781 "total_live_bytes=%d\n",
737 candidate_count, static_cast<int>(total_live_bytes / KB), 782 AllocationSpaceName(space->identity()), reduce_memory,
738 AllocationSpaceName(space->identity()), 783 candidate_count, total_live_bytes / KB);
739 (reduce_memory ? "reduce memory footprint" : "normal"));
740 } 784 }
741 } 785 }
742 786
743 787
744 void MarkCompactCollector::AbortCompaction() { 788 void MarkCompactCollector::AbortCompaction() {
745 if (compacting_) { 789 if (compacting_) {
746 int npages = evacuation_candidates_.length(); 790 int npages = evacuation_candidates_.length();
747 for (int i = 0; i < npages; i++) { 791 for (int i = 0; i < npages; i++) {
748 Page* p = evacuation_candidates_[i]; 792 Page* p = evacuation_candidates_[i];
749 slots_buffer_allocator_->DeallocateChain(p->slots_buffer_address()); 793 slots_buffer_allocator_->DeallocateChain(p->slots_buffer_address());
(...skipping 3356 matching lines...) Expand 10 before | Expand all | Expand 10 after
4106 MarkBit mark_bit = Marking::MarkBitFrom(host); 4150 MarkBit mark_bit = Marking::MarkBitFrom(host);
4107 if (Marking::IsBlack(mark_bit)) { 4151 if (Marking::IsBlack(mark_bit)) {
4108 RelocInfo rinfo(pc, RelocInfo::CODE_TARGET, 0, host); 4152 RelocInfo rinfo(pc, RelocInfo::CODE_TARGET, 0, host);
4109 RecordRelocSlot(&rinfo, target); 4153 RecordRelocSlot(&rinfo, target);
4110 } 4154 }
4111 } 4155 }
4112 } 4156 }
4113 4157
4114 } // namespace internal 4158 } // namespace internal
4115 } // namespace v8 4159 } // namespace v8
OLDNEW
« no previous file with comments | « src/heap/mark-compact.h ('k') | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698